sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
listlengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
listlengths
0
25
languages
listlengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
listlengths
0
352
processed_texts
listlengths
1
353
tokens_length
listlengths
1
353
input_texts
listlengths
1
40
da745575b4499249aec1a87dd6161205630d3463
# Sentiment Analysis Dataset This contains artificially constructed dataset labelled with their respective sentiment ## Dataset Description: - Number of Rows: 10,000 - Number of Columns: 2 - Column Names: 'Tweet', 'Emotion' - Description: This dataset contains tweets labeled with various emotions. Each row consists of a tweet and its corresponding emotion label, such as 'Anger', 'Shame', 'Sadness', or 'Fear'.
nikesh66/sentiment-detection-dataset
[ "language:en", "region:us" ]
2023-12-11T10:56:51+00:00
{"language": ["en"]}
2023-12-11T11:23:07+00:00
[]
[ "en" ]
TAGS #language-English #region-us
# Sentiment Analysis Dataset This contains artificially constructed dataset labelled with their respective sentiment ## Dataset Description: - Number of Rows: 10,000 - Number of Columns: 2 - Column Names: 'Tweet', 'Emotion' - Description: This dataset contains tweets labeled with various emotions. Each row consists of a tweet and its corresponding emotion label, such as 'Anger', 'Shame', 'Sadness', or 'Fear'.
[ "# Sentiment Analysis Dataset\nThis contains artificially constructed dataset labelled with their respective sentiment", "## Dataset Description:\n- Number of Rows: 10,000\n- Number of Columns: 2\n- Column Names: 'Tweet', 'Emotion'\n- Description: This dataset contains tweets labeled with various emotions. Each row consists of a tweet and its corresponding emotion label, such as 'Anger', 'Shame', 'Sadness', or 'Fear'." ]
[ "TAGS\n#language-English #region-us \n", "# Sentiment Analysis Dataset\nThis contains artificially constructed dataset labelled with their respective sentiment", "## Dataset Description:\n- Number of Rows: 10,000\n- Number of Columns: 2\n- Column Names: 'Tweet', 'Emotion'\n- Description: This dataset contains tweets labeled with various emotions. Each row consists of a tweet and its corresponding emotion label, such as 'Anger', 'Shame', 'Sadness', or 'Fear'." ]
[ 10, 22, 90 ]
[ "passage: TAGS\n#language-English #region-us \n# Sentiment Analysis Dataset\nThis contains artificially constructed dataset labelled with their respective sentiment## Dataset Description:\n- Number of Rows: 10,000\n- Number of Columns: 2\n- Column Names: 'Tweet', 'Emotion'\n- Description: This dataset contains tweets labeled with various emotions. Each row consists of a tweet and its corresponding emotion label, such as 'Anger', 'Shame', 'Sadness', or 'Fear'." ]
43496510f20c4c3f0f18970f0160acf6cbaef442
# Hate Speech Dataset This dataset contains artificially genrated tweets alongwith its label whether it is hatespeech or not ## Dataset Description - Number of Rows: 5,000 - Number of Columns: 2 - Column Names: 'Tweet', 'Hate Speech' - Description: This dataset comprises tweets with annotations indicating whether they contain hate speech or not. Each row has a tweet and a binary label ('yes' or 'no') denoting the presence of hate speech.
nikesh66/Hatespeech-Dataset
[ "language:en", "region:us" ]
2023-12-11T11:11:14+00:00
{"language": ["en"]}
2023-12-11T11:13:41+00:00
[]
[ "en" ]
TAGS #language-English #region-us
# Hate Speech Dataset This dataset contains artificially genrated tweets alongwith its label whether it is hatespeech or not ## Dataset Description - Number of Rows: 5,000 - Number of Columns: 2 - Column Names: 'Tweet', 'Hate Speech' - Description: This dataset comprises tweets with annotations indicating whether they contain hate speech or not. Each row has a tweet and a binary label ('yes' or 'no') denoting the presence of hate speech.
[ "# Hate Speech Dataset\nThis dataset contains artificially genrated tweets alongwith its label whether it is hatespeech or not", "## Dataset Description\n- Number of Rows: 5,000\n- Number of Columns: 2\n- Column Names: 'Tweet', 'Hate Speech'\n- Description: This dataset comprises tweets with annotations indicating whether they contain hate speech or not. Each row has a tweet and a binary label ('yes' or 'no') denoting the presence of hate speech." ]
[ "TAGS\n#language-English #region-us \n", "# Hate Speech Dataset\nThis dataset contains artificially genrated tweets alongwith its label whether it is hatespeech or not", "## Dataset Description\n- Number of Rows: 5,000\n- Number of Columns: 2\n- Column Names: 'Tweet', 'Hate Speech'\n- Description: This dataset comprises tweets with annotations indicating whether they contain hate speech or not. Each row has a tweet and a binary label ('yes' or 'no') denoting the presence of hate speech." ]
[ 10, 30, 88 ]
[ "passage: TAGS\n#language-English #region-us \n# Hate Speech Dataset\nThis dataset contains artificially genrated tweets alongwith its label whether it is hatespeech or not## Dataset Description\n- Number of Rows: 5,000\n- Number of Columns: 2\n- Column Names: 'Tweet', 'Hate Speech'\n- Description: This dataset comprises tweets with annotations indicating whether they contain hate speech or not. Each row has a tweet and a binary label ('yes' or 'no') denoting the presence of hate speech." ]
0c9bc18469ead579bbc540e4f765c1edaf07312c
# Slang Dataset It contains artificially generated slang data along with their label ## Dataset Descripton: - Number of Rows: 5,000 - Number of Columns: 2 - Column Names: 'Tweet', 'Sarcasm (yes/no)' - Description: This dataset features tweets labeled for sarcasm. Each tweet is accompanied by a label ('yes' or 'no') indicating whether the tweet is sarcastic.
nikesh66/Slang-Dataset
[ "size_categories:1K<n<10K", "language:en", "region:us" ]
2023-12-11T11:15:40+00:00
{"language": ["en"], "size_categories": ["1K<n<10K"]}
2023-12-11T11:18:32+00:00
[]
[ "en" ]
TAGS #size_categories-1K<n<10K #language-English #region-us
# Slang Dataset It contains artificially generated slang data along with their label ## Dataset Descripton: - Number of Rows: 5,000 - Number of Columns: 2 - Column Names: 'Tweet', 'Sarcasm (yes/no)' - Description: This dataset features tweets labeled for sarcasm. Each tweet is accompanied by a label ('yes' or 'no') indicating whether the tweet is sarcastic.
[ "# Slang Dataset\nIt contains artificially generated slang data along with their label", "## Dataset Descripton:\n\n- Number of Rows: 5,000\n- Number of Columns: 2\n- Column Names: 'Tweet', 'Sarcasm (yes/no)'\n- Description: This dataset features tweets labeled for sarcasm. Each tweet is accompanied by a label ('yes' or 'no') indicating whether the tweet is sarcastic." ]
[ "TAGS\n#size_categories-1K<n<10K #language-English #region-us \n", "# Slang Dataset\nIt contains artificially generated slang data along with their label", "## Dataset Descripton:\n\n- Number of Rows: 5,000\n- Number of Columns: 2\n- Column Names: 'Tweet', 'Sarcasm (yes/no)'\n- Description: This dataset features tweets labeled for sarcasm. Each tweet is accompanied by a label ('yes' or 'no') indicating whether the tweet is sarcastic." ]
[ 22, 18, 89 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #language-English #region-us \n# Slang Dataset\nIt contains artificially generated slang data along with their label## Dataset Descripton:\n\n- Number of Rows: 5,000\n- Number of Columns: 2\n- Column Names: 'Tweet', 'Sarcasm (yes/no)'\n- Description: This dataset features tweets labeled for sarcasm. Each tweet is accompanied by a label ('yes' or 'no') indicating whether the tweet is sarcastic." ]
a73648b5242e024dfc4b6ad23fe42df40533452f
## SummEval FR This dataset is a french translation of the original work [SummEval](https://github.com/Yale-LILY/SummEval). The translation was made using [DeepL](https://www.deepl.com) from English to French. We use this dataset for the french version of [MTEB](https://github.com/embeddings-benchmark/mteb) : The annotations include summaries generated by 16 models from 100 source news articles (1600 examples in total). Each of the summaries was annotated by 5 indepedent crowdsource workers and 3 independent experts (8 annotations in total). Summaries were evaluated across 4 dimensions: coherence, consistency, fluency, relevance. Each source news article comes with the original reference from the CNN/DailyMail dataset and 10 additional crowdsources reference summaries. For this dataset, the 3 expert annotations were averaged to get the human scores. source : https://huggingface.co/datasets/mteb/summeval ### Usage To use this dataset, you can run the following code : ```py from datasets import load_dataset dataset = load_dataset("lyon-nlp/summarization-summeval-fr-p2p", "test") ``` > Fabbri, A.R., Kryscinski, W., McCann, B., Socher, R., & Radev, D.R. (2020). SummEval: Re-evaluating Summarization Evaluation. Transactions of the Association for Computational Linguistics, 9, 391-409.
lyon-nlp/summarization-summeval-fr-p2p
[ "task_categories:summarization", "size_categories:n<1K", "language:fr", "license:apache-2.0", "region:us" ]
2023-12-11T11:17:49+00:00
{"language": ["fr"], "license": "apache-2.0", "size_categories": ["n<1K"], "task_categories": ["summarization"]}
2023-12-11T16:48:01+00:00
[]
[ "fr" ]
TAGS #task_categories-summarization #size_categories-n<1K #language-French #license-apache-2.0 #region-us
## SummEval FR This dataset is a french translation of the original work SummEval. The translation was made using DeepL from English to French. We use this dataset for the french version of MTEB : The annotations include summaries generated by 16 models from 100 source news articles (1600 examples in total). Each of the summaries was annotated by 5 indepedent crowdsource workers and 3 independent experts (8 annotations in total). Summaries were evaluated across 4 dimensions: coherence, consistency, fluency, relevance. Each source news article comes with the original reference from the CNN/DailyMail dataset and 10 additional crowdsources reference summaries. For this dataset, the 3 expert annotations were averaged to get the human scores. source : URL ### Usage To use this dataset, you can run the following code : > Fabbri, A.R., Kryscinski, W., McCann, B., Socher, R., & Radev, D.R. (2020). SummEval: Re-evaluating Summarization Evaluation. Transactions of the Association for Computational Linguistics, 9, 391-409.
[ "## SummEval FR\n\nThis dataset is a french translation of the original work SummEval. \nThe translation was made using DeepL from English to French.\n\n\nWe use this dataset for the french version of MTEB :\n\nThe annotations include summaries generated by 16 models from 100 source news articles (1600 examples in total). Each of the summaries was annotated by 5 indepedent crowdsource workers and 3 independent experts (8 annotations in total). Summaries were evaluated across 4 dimensions: coherence, consistency, fluency, relevance. Each source news article comes with the original reference from the CNN/DailyMail dataset and 10 additional crowdsources reference summaries.\n\nFor this dataset, the 3 expert annotations were averaged to get the human scores.\n\nsource : URL", "### Usage\n\nTo use this dataset, you can run the following code :\n\n\n\n> Fabbri, A.R., Kryscinski, W., McCann, B., Socher, R., & Radev, D.R. (2020). SummEval: Re-evaluating Summarization Evaluation. Transactions of the Association for Computational Linguistics, 9, 391-409." ]
[ "TAGS\n#task_categories-summarization #size_categories-n<1K #language-French #license-apache-2.0 #region-us \n", "## SummEval FR\n\nThis dataset is a french translation of the original work SummEval. \nThe translation was made using DeepL from English to French.\n\n\nWe use this dataset for the french version of MTEB :\n\nThe annotations include summaries generated by 16 models from 100 source news articles (1600 examples in total). Each of the summaries was annotated by 5 indepedent crowdsource workers and 3 independent experts (8 annotations in total). Summaries were evaluated across 4 dimensions: coherence, consistency, fluency, relevance. Each source news article comes with the original reference from the CNN/DailyMail dataset and 10 additional crowdsources reference summaries.\n\nFor this dataset, the 3 expert annotations were averaged to get the human scores.\n\nsource : URL", "### Usage\n\nTo use this dataset, you can run the following code :\n\n\n\n> Fabbri, A.R., Kryscinski, W., McCann, B., Socher, R., & Radev, D.R. (2020). SummEval: Re-evaluating Summarization Evaluation. Transactions of the Association for Computational Linguistics, 9, 391-409." ]
[ 40, 175, 91 ]
[ "passage: TAGS\n#task_categories-summarization #size_categories-n<1K #language-French #license-apache-2.0 #region-us \n## SummEval FR\n\nThis dataset is a french translation of the original work SummEval. \nThe translation was made using DeepL from English to French.\n\n\nWe use this dataset for the french version of MTEB :\n\nThe annotations include summaries generated by 16 models from 100 source news articles (1600 examples in total). Each of the summaries was annotated by 5 indepedent crowdsource workers and 3 independent experts (8 annotations in total). Summaries were evaluated across 4 dimensions: coherence, consistency, fluency, relevance. Each source news article comes with the original reference from the CNN/DailyMail dataset and 10 additional crowdsources reference summaries.\n\nFor this dataset, the 3 expert annotations were averaged to get the human scores.\n\nsource : URL### Usage\n\nTo use this dataset, you can run the following code :\n\n\n\n> Fabbri, A.R., Kryscinski, W., McCann, B., Socher, R., & Radev, D.R. (2020). SummEval: Re-evaluating Summarization Evaluation. Transactions of the Association for Computational Linguistics, 9, 391-409." ]
426f9adc95648482d56d83a58fac80bc5e5ef813
# Sarcasm Dataset This dataset contains sarcastic sentence along with their binary label ## Dataset Desciption: - Number of Rows: 5,000 - Number of Columns: 2 - Column Names: 'Tweet', 'Slang (yes/no)' - Description: The dataset contains tweets annotated for the use of slang. It includes a binary label ('yes' or 'no') indicating the presence of slang in each tweet.
nikesh66/Sarcasm-dataset
[ "size_categories:1K<n<10K", "language:en", "region:us" ]
2023-12-11T11:21:07+00:00
{"language": ["en"], "size_categories": ["1K<n<10K"]}
2023-12-11T11:22:43+00:00
[]
[ "en" ]
TAGS #size_categories-1K<n<10K #language-English #region-us
# Sarcasm Dataset This dataset contains sarcastic sentence along with their binary label ## Dataset Desciption: - Number of Rows: 5,000 - Number of Columns: 2 - Column Names: 'Tweet', 'Slang (yes/no)' - Description: The dataset contains tweets annotated for the use of slang. It includes a binary label ('yes' or 'no') indicating the presence of slang in each tweet.
[ "# Sarcasm Dataset\nThis dataset contains sarcastic sentence along with their binary label", "## Dataset Desciption:\n\n- Number of Rows: 5,000\n- Number of Columns: 2\n- Column Names: 'Tweet', 'Slang (yes/no)'\n- Description: The dataset contains tweets annotated for the use of slang. It includes a binary label ('yes' or 'no') indicating the presence of slang in each tweet." ]
[ "TAGS\n#size_categories-1K<n<10K #language-English #region-us \n", "# Sarcasm Dataset\nThis dataset contains sarcastic sentence along with their binary label", "## Dataset Desciption:\n\n- Number of Rows: 5,000\n- Number of Columns: 2\n- Column Names: 'Tweet', 'Slang (yes/no)'\n- Description: The dataset contains tweets annotated for the use of slang. It includes a binary label ('yes' or 'no') indicating the presence of slang in each tweet." ]
[ 22, 21, 86 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #language-English #region-us \n# Sarcasm Dataset\nThis dataset contains sarcastic sentence along with their binary label## Dataset Desciption:\n\n- Number of Rows: 5,000\n- Number of Columns: 2\n- Column Names: 'Tweet', 'Slang (yes/no)'\n- Description: The dataset contains tweets annotated for the use of slang. It includes a binary label ('yes' or 'no') indicating the presence of slang in each tweet." ]
771b8b9a7c6743f6b327ecd5f3a6a40fd6b78c17
# Dataset Card for "sys-human_db2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
collabteza/sys-human_db2
[ "region:us" ]
2023-12-11T11:31:52+00:00
{"dataset_info": {"features": [{"name": "System Prompt", "dtype": "string"}, {"name": "Human Prompt", "dtype": "string"}, {"name": "Output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 972089, "num_examples": 1530}], "download_size": 460352, "dataset_size": 972089}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-11T11:31:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sys-human_db2" More Information needed
[ "# Dataset Card for \"sys-human_db2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sys-human_db2\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"sys-human_db2\"\n\nMore Information needed" ]
a17d95c4af0a33a31591f187f1fe15232991397a
# Dataset Card for "raaga_dataset_v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
zense-raaga-ai/raaga_dataset_v2
[ "region:us" ]
2023-12-11T11:42:23+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "RaagaNumber", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 22119715558.812, "num_examples": 86746}], "download_size": 29873744194, "dataset_size": 22119715558.812}}
2023-12-11T13:30:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "raaga_dataset_v2" More Information needed
[ "# Dataset Card for \"raaga_dataset_v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"raaga_dataset_v2\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"raaga_dataset_v2\"\n\nMore Information needed" ]
cb227a000b27d8db94198d1242e1607036136786
# Dataset Card for Israel-HAMAS war news ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Personal and Sensitive Information](#personal-and-sensitive-information) ## Dataset Description **Point of Contact:** [Alexander Akhterov](mailto:[email protected]) ### Dataset Summary The "Israel-HAMAS war news" dataset is an English-language dataset of news about Israel war against the terrorist organization - HAMAS that happened after "black Saturday" - massive murders of civilian Israeli people on the 7th of October 2023. We've accumulated news from the following sources: - BBC (live news) - from 2023-11-05 to 2023-11-18. Total: 805 - The Times of Israel (live news) - from 2023-10-07 to 2023-11-18. Total: 6581 - Al Jazeera (live news) - from 2023-11-04 to 2023-11-25. Total: 3297 - Al Mayadeen (articles from the site) - from 2023-10-08 to 2023-11-24. Total: 74 - WAFA "News" Agency (articles from the site section "Occupation")- from 2023-09-28 to 2023-11-26. Total: 1020 - CNN live news - from 2023-10-26 to 2023-11-26. Total: 1428 All news is collected using Beautiful Soup Python library. In the case of the BBC site, the WAFA site, and the Al Mayadeen site it was enough to use simple GET requests and in the other cases we used Silenium. ### Supported Tasks and Leaderboards `sentiment-classification`, `semantic-similarity-classification`: The idea behind the dataset is to fine-tune one of the LLMs to make a news sentiment analysis regarding the pro- and anti-Israel attitudes (but the collected dataset can be used also for other NLP tasks). One of the main issue is data labeling. To overcome it we suppose that almost all news from the Palestinian "news" agency "WAFA" and Lebanese agency "Al Mayadeen" has anti-Israel position. Most of the Al Jazeera news also tends to be against Israel. In return news by "The Times of Israel" is mostly pro-Israel. For example, the following piece of news carries an anti-Israel pattern: > KHAN YUNIS, Sunday, December 10, 2023 (WAFA) - At least 10 civilians were killed, mostly children, and dozens more were wounded > early this morning as Israeli warplanes bombed a residential house in Khan Yunis, south of the Gaza Strip, as the Israeli > aggression on the enclave enters its 65th day in a row. (WAFA "news" agency). On the contrary, the following information from "The Times of Israel" is pro-Israel: > Several thousand people demonstrate against antisemitism in Berlin as Germany grapples with a large increase in anti-Jewish > incidents following Hamas’s assault on Israel two months ago. Police estimate that around 3,200 people gathered in the rain > in the German capital, while organizers put the figure at 10,000, German news agency dpa reports. Participants in the protest, > titled “Never again is now,” march to the Brandenburg Gate. ### Languages The text in the dataset is in English since most news is in English. The associated BCP-47 code is en. ## Dataset Structure ### Data Instances ``` {'url': 'https://www.timesofisrael.com/liveblog_entry/man-arrested-in-death-of-jewish-protester-during-dueling-california-rallies-over-war/', 'datetime': '2023-11-16T18:36:15', 'title': 'Man arrested in death of Jewish protester during dueling California rallies over war', 'text': 'California authorities say they have arrested a man in connection with the death of a Jewish protester during demonstrations over the Israel-Hamas war.\nThe Ventura County Sheriff’s Office says the 50-year-old suspect was arrested today and will be booked into jail in the investigation of involuntary manslaughter — the unintentional killing of another person. The district attorney will decide whether there is enough evidence to bring a formal charge.\nPaul Kessler, 69, died early November 6 at a hospital following a November 5 confrontation with a pro-Palestinian demonstrator in Thousand Oaks, a suburb northwest of Los Angeles.\nSheriff Jim Fryhoff said subsequently that deputies determined Kessler had fallen backward and struck his head on the ground. The pro-Palestinian demonstrator stayed at the scene and told deputies he had called 911, Fryhoff said.', 'provider': 'The Times of Israel', 'source': 'site-live-news'} ``` ### Data Fields - "url" - link to the piece of news; - "datetime" - news date and time (YYYY-mm-ddTHH:MM:SS); - "title" - news title; - "text" - news text; - "provider" - news providers, can be 'BBC', 'The Times of Israel', 'Al Jazeera', 'Al Mayadeen', 'WAFA News Agency', and 'CNN'; - "source" - where news was collected. ## Dataset Creation ### Curation Rationale The "Israel-HAMAS war news" dataset was built as a solution to the data annotation problem for the sentiment analysis task regarding the pro- and anti-Israel attitudes. We've supposed that almost all news from the Palestinian "news" agency "WAFA", Lebanese agency "Al Mayadeen", and Qatari "Al Jazeera" has anti-Israel position. In return news by "The Times of Israel" is mostly pro-Israel. ### Source Data The official sites of the news agencies are the sources of the dataset: - The BBC - https://www.bbc.com/ - The Times of Israel - https://www.timesofisrael.com/ - Al Jazeera - https://www.aljazeera.com/ - Al Mayadeen - https://english.almayadeen.net/ - WAFA News Agency - https://english.wafa.ps/ - CNN - https://edition.cnn.com/ - #### Initial Data Collection and Normalization The data was obtained by web scraping of the appropriate sections of the news agencies' official sites. ### Annotations The dataset does not contain any additional annotations. ### Personal and Sensitive Information The news authors were removed.
aav-ds/Israel-HAMAS_war_news
[ "task_categories:text-classification", "task_categories:text-generation", "size_categories:10K<n<100K", "language:en", "region:us" ]
2023-12-11T11:54:53+00:00
{"language": ["en"], "size_categories": ["10K<n<100K"], "task_categories": ["text-classification", "text-generation"], "pretty_name": "Israel-HAMAS war news", "dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "datetime", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "provider", "dtype": "string"}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 14269346, "num_examples": 13103}], "download_size": 6081837, "dataset_size": 14269346}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-11T19:24:26+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_categories-text-generation #size_categories-10K<n<100K #language-English #region-us
# Dataset Card for Israel-HAMAS war news ## Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Dataset Creation - Curation Rationale - Source Data - Personal and Sensitive Information ## Dataset Description Point of Contact: Alexander Akhterov ### Dataset Summary The "Israel-HAMAS war news" dataset is an English-language dataset of news about Israel war against the terrorist organization - HAMAS that happened after "black Saturday" - massive murders of civilian Israeli people on the 7th of October 2023. We've accumulated news from the following sources: - BBC (live news) - from 2023-11-05 to 2023-11-18. Total: 805 - The Times of Israel (live news) - from 2023-10-07 to 2023-11-18. Total: 6581 - Al Jazeera (live news) - from 2023-11-04 to 2023-11-25. Total: 3297 - Al Mayadeen (articles from the site) - from 2023-10-08 to 2023-11-24. Total: 74 - WAFA "News" Agency (articles from the site section "Occupation")- from 2023-09-28 to 2023-11-26. Total: 1020 - CNN live news - from 2023-10-26 to 2023-11-26. Total: 1428 All news is collected using Beautiful Soup Python library. In the case of the BBC site, the WAFA site, and the Al Mayadeen site it was enough to use simple GET requests and in the other cases we used Silenium. ### Supported Tasks and Leaderboards 'sentiment-classification', 'semantic-similarity-classification': The idea behind the dataset is to fine-tune one of the LLMs to make a news sentiment analysis regarding the pro- and anti-Israel attitudes (but the collected dataset can be used also for other NLP tasks). One of the main issue is data labeling. To overcome it we suppose that almost all news from the Palestinian "news" agency "WAFA" and Lebanese agency "Al Mayadeen" has anti-Israel position. Most of the Al Jazeera news also tends to be against Israel. In return news by "The Times of Israel" is mostly pro-Israel. For example, the following piece of news carries an anti-Israel pattern: > KHAN YUNIS, Sunday, December 10, 2023 (WAFA) - At least 10 civilians were killed, mostly children, and dozens more were wounded > early this morning as Israeli warplanes bombed a residential house in Khan Yunis, south of the Gaza Strip, as the Israeli > aggression on the enclave enters its 65th day in a row. (WAFA "news" agency). On the contrary, the following information from "The Times of Israel" is pro-Israel: > Several thousand people demonstrate against antisemitism in Berlin as Germany grapples with a large increase in anti-Jewish > incidents following Hamas’s assault on Israel two months ago. Police estimate that around 3,200 people gathered in the rain > in the German capital, while organizers put the figure at 10,000, German news agency dpa reports. Participants in the protest, > titled “Never again is now,” march to the Brandenburg Gate. ### Languages The text in the dataset is in English since most news is in English. The associated BCP-47 code is en. ## Dataset Structure ### Data Instances ### Data Fields - "url" - link to the piece of news; - "datetime" - news date and time (YYYY-mm-ddTHH:MM:SS); - "title" - news title; - "text" - news text; - "provider" - news providers, can be 'BBC', 'The Times of Israel', 'Al Jazeera', 'Al Mayadeen', 'WAFA News Agency', and 'CNN'; - "source" - where news was collected. ## Dataset Creation ### Curation Rationale The "Israel-HAMAS war news" dataset was built as a solution to the data annotation problem for the sentiment analysis task regarding the pro- and anti-Israel attitudes. We've supposed that almost all news from the Palestinian "news" agency "WAFA", Lebanese agency "Al Mayadeen", and Qatari "Al Jazeera" has anti-Israel position. In return news by "The Times of Israel" is mostly pro-Israel. ### Source Data The official sites of the news agencies are the sources of the dataset: - The BBC - URL - The Times of Israel - URL - Al Jazeera - URL - Al Mayadeen - URL - WAFA News Agency - URL - CNN - URL - #### Initial Data Collection and Normalization The data was obtained by web scraping of the appropriate sections of the news agencies' official sites. ### Annotations The dataset does not contain any additional annotations. ### Personal and Sensitive Information The news authors were removed.
[ "# Dataset Card for Israel-HAMAS war news", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Personal and Sensitive Information", "## Dataset Description\n\nPoint of Contact: Alexander Akhterov", "### Dataset Summary\n\nThe \"Israel-HAMAS war news\" dataset is an English-language dataset of news about Israel war against the terrorist organization - \nHAMAS that happened after \"black Saturday\" - massive murders of civilian Israeli people on the 7th of October 2023.\n\nWe've accumulated news from the following sources:\n- BBC (live news) - from 2023-11-05 to 2023-11-18. Total: 805\n- The Times of Israel (live news) - from 2023-10-07 to 2023-11-18. Total: 6581\n- Al Jazeera (live news) - from 2023-11-04 to 2023-11-25. Total: 3297\n- Al Mayadeen (articles from the site) - from 2023-10-08 to 2023-11-24. Total: 74\n- WAFA \"News\" Agency (articles from the site section \"Occupation\")- from 2023-09-28 to 2023-11-26. Total: 1020\n- CNN live news - from 2023-10-26 to 2023-11-26. Total: 1428\n\nAll news is collected using Beautiful Soup Python library. In the case of the BBC site, the WAFA site, and the Al Mayadeen site \nit was enough to use simple GET requests and in the other cases we used Silenium.", "### Supported Tasks and Leaderboards\n\n'sentiment-classification', 'semantic-similarity-classification': The idea behind the dataset is to fine-tune one of the LLMs \nto make a news sentiment analysis regarding the pro- and anti-Israel attitudes (but the collected dataset can be used also \nfor other NLP tasks). One of the main issue is data labeling. To overcome it we suppose that almost all news from the Palestinian \n\"news\" agency \"WAFA\" and Lebanese agency \"Al Mayadeen\" has anti-Israel position. Most of the Al Jazeera news also tends to be against \nIsrael. In return news by \"The Times of Israel\" is mostly pro-Israel. \n\nFor example, the following piece of news carries an anti-Israel pattern:\n> KHAN YUNIS, Sunday, December 10, 2023 (WAFA) - At least 10 civilians were killed, mostly children, and dozens more were wounded\n> early this morning as Israeli warplanes bombed a residential house in Khan Yunis, south of the Gaza Strip, as the Israeli\n> aggression on the enclave enters its 65th day in a row. (WAFA \"news\" agency).\n\nOn the contrary, the following information from \"The Times of Israel\" is pro-Israel:\n\n> Several thousand people demonstrate against antisemitism in Berlin as Germany grapples with a large increase in anti-Jewish\n> incidents following Hamas’s assault on Israel two months ago. Police estimate that around 3,200 people gathered in the rain\n> in the German capital, while organizers put the figure at 10,000, German news agency dpa reports. Participants in the protest,\n> titled “Never again is now,” march to the Brandenburg Gate.", "### Languages\n\nThe text in the dataset is in English since most news is in English. The associated BCP-47 code is en.", "## Dataset Structure", "### Data Instances", "### Data Fields\n\n- \"url\" - link to the piece of news;\n- \"datetime\" - news date and time (YYYY-mm-ddTHH:MM:SS);\n- \"title\" - news title;\n- \"text\" - news text;\n- \"provider\" - news providers, can be 'BBC', 'The Times of Israel', 'Al Jazeera', 'Al Mayadeen', 'WAFA News Agency', and 'CNN';\n- \"source\" - where news was collected.", "## Dataset Creation", "### Curation Rationale\n\nThe \"Israel-HAMAS war news\" dataset was built as a solution to the data annotation problem for the sentiment analysis task \nregarding the pro- and anti-Israel attitudes. We've supposed that almost all news from the Palestinian \"news\" agency \"WAFA\", \nLebanese agency \"Al Mayadeen\", and Qatari \"Al Jazeera\" has anti-Israel position. In return news by \"The Times of Israel\" \nis mostly pro-Israel.", "### Source Data\n\nThe official sites of the news agencies are the sources of the dataset:\n\n- The BBC - URL\n- The Times of Israel - URL\n- Al Jazeera - URL\n- Al Mayadeen - URL\n- WAFA News Agency - URL\n- CNN - URL\n-", "#### Initial Data Collection and Normalization\n\nThe data was obtained by web scraping of the appropriate sections of the news agencies' official sites.", "### Annotations\n\nThe dataset does not contain any additional annotations.", "### Personal and Sensitive Information\n\nThe news authors were removed." ]
[ "TAGS\n#task_categories-text-classification #task_categories-text-generation #size_categories-10K<n<100K #language-English #region-us \n", "# Dataset Card for Israel-HAMAS war news", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Personal and Sensitive Information", "## Dataset Description\n\nPoint of Contact: Alexander Akhterov", "### Dataset Summary\n\nThe \"Israel-HAMAS war news\" dataset is an English-language dataset of news about Israel war against the terrorist organization - \nHAMAS that happened after \"black Saturday\" - massive murders of civilian Israeli people on the 7th of October 2023.\n\nWe've accumulated news from the following sources:\n- BBC (live news) - from 2023-11-05 to 2023-11-18. Total: 805\n- The Times of Israel (live news) - from 2023-10-07 to 2023-11-18. Total: 6581\n- Al Jazeera (live news) - from 2023-11-04 to 2023-11-25. Total: 3297\n- Al Mayadeen (articles from the site) - from 2023-10-08 to 2023-11-24. Total: 74\n- WAFA \"News\" Agency (articles from the site section \"Occupation\")- from 2023-09-28 to 2023-11-26. Total: 1020\n- CNN live news - from 2023-10-26 to 2023-11-26. Total: 1428\n\nAll news is collected using Beautiful Soup Python library. In the case of the BBC site, the WAFA site, and the Al Mayadeen site \nit was enough to use simple GET requests and in the other cases we used Silenium.", "### Supported Tasks and Leaderboards\n\n'sentiment-classification', 'semantic-similarity-classification': The idea behind the dataset is to fine-tune one of the LLMs \nto make a news sentiment analysis regarding the pro- and anti-Israel attitudes (but the collected dataset can be used also \nfor other NLP tasks). One of the main issue is data labeling. To overcome it we suppose that almost all news from the Palestinian \n\"news\" agency \"WAFA\" and Lebanese agency \"Al Mayadeen\" has anti-Israel position. Most of the Al Jazeera news also tends to be against \nIsrael. In return news by \"The Times of Israel\" is mostly pro-Israel. \n\nFor example, the following piece of news carries an anti-Israel pattern:\n> KHAN YUNIS, Sunday, December 10, 2023 (WAFA) - At least 10 civilians were killed, mostly children, and dozens more were wounded\n> early this morning as Israeli warplanes bombed a residential house in Khan Yunis, south of the Gaza Strip, as the Israeli\n> aggression on the enclave enters its 65th day in a row. (WAFA \"news\" agency).\n\nOn the contrary, the following information from \"The Times of Israel\" is pro-Israel:\n\n> Several thousand people demonstrate against antisemitism in Berlin as Germany grapples with a large increase in anti-Jewish\n> incidents following Hamas’s assault on Israel two months ago. Police estimate that around 3,200 people gathered in the rain\n> in the German capital, while organizers put the figure at 10,000, German news agency dpa reports. Participants in the protest,\n> titled “Never again is now,” march to the Brandenburg Gate.", "### Languages\n\nThe text in the dataset is in English since most news is in English. The associated BCP-47 code is en.", "## Dataset Structure", "### Data Instances", "### Data Fields\n\n- \"url\" - link to the piece of news;\n- \"datetime\" - news date and time (YYYY-mm-ddTHH:MM:SS);\n- \"title\" - news title;\n- \"text\" - news text;\n- \"provider\" - news providers, can be 'BBC', 'The Times of Israel', 'Al Jazeera', 'Al Mayadeen', 'WAFA News Agency', and 'CNN';\n- \"source\" - where news was collected.", "## Dataset Creation", "### Curation Rationale\n\nThe \"Israel-HAMAS war news\" dataset was built as a solution to the data annotation problem for the sentiment analysis task \nregarding the pro- and anti-Israel attitudes. We've supposed that almost all news from the Palestinian \"news\" agency \"WAFA\", \nLebanese agency \"Al Mayadeen\", and Qatari \"Al Jazeera\" has anti-Israel position. In return news by \"The Times of Israel\" \nis mostly pro-Israel.", "### Source Data\n\nThe official sites of the news agencies are the sources of the dataset:\n\n- The BBC - URL\n- The Times of Israel - URL\n- Al Jazeera - URL\n- Al Mayadeen - URL\n- WAFA News Agency - URL\n- CNN - URL\n-", "#### Initial Data Collection and Normalization\n\nThe data was obtained by web scraping of the appropriate sections of the news agencies' official sites.", "### Annotations\n\nThe dataset does not contain any additional annotations.", "### Personal and Sensitive Information\n\nThe news authors were removed." ]
[ 44, 11, 62, 12, 267, 392, 29, 6, 6, 118, 5, 106, 57, 32, 17, 15 ]
[ "passage: TAGS\n#task_categories-text-classification #task_categories-text-generation #size_categories-10K<n<100K #language-English #region-us \n# Dataset Card for Israel-HAMAS war news## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Personal and Sensitive Information## Dataset Description\n\nPoint of Contact: Alexander Akhterov### Dataset Summary\n\nThe \"Israel-HAMAS war news\" dataset is an English-language dataset of news about Israel war against the terrorist organization - \nHAMAS that happened after \"black Saturday\" - massive murders of civilian Israeli people on the 7th of October 2023.\n\nWe've accumulated news from the following sources:\n- BBC (live news) - from 2023-11-05 to 2023-11-18. Total: 805\n- The Times of Israel (live news) - from 2023-10-07 to 2023-11-18. Total: 6581\n- Al Jazeera (live news) - from 2023-11-04 to 2023-11-25. Total: 3297\n- Al Mayadeen (articles from the site) - from 2023-10-08 to 2023-11-24. Total: 74\n- WAFA \"News\" Agency (articles from the site section \"Occupation\")- from 2023-09-28 to 2023-11-26. Total: 1020\n- CNN live news - from 2023-10-26 to 2023-11-26. Total: 1428\n\nAll news is collected using Beautiful Soup Python library. In the case of the BBC site, the WAFA site, and the Al Mayadeen site \nit was enough to use simple GET requests and in the other cases we used Silenium." ]
ae6b0c54fca9fa26096f7de175c747e4b262e01a
# Dataset Card for Math-Shepherd Project Page: [Math-Shepherd](https://rain-motion-6ec.notion.site/Math-Shepherd-A-Label-Free-Step-by-Step-Verifier-for-LLMs-in-Mathematical-Reasoning-41b6e73c860840e08697d347f8889bac#08e86c6d44c4452ba0b78c7aaea5f4f7) Paper: https://arxiv.org/pdf/2312.08935.pdf # Data Loading ``` from datasets import load_dataset dataset = load_dataset("peiyi9979/Math-Shepherd") ``` # Data Instance Every instance consists of three data fields: "input," "label," and "task". 1. "input": problem + step-by-step solution, e.g., ``` If Buzz bought a pizza with 78 slices at a restaurant and then decided to share it with the waiter in the ratio of 5:8, with Buzz's ratio being 5, what's twenty less the number of slices of pizza that the waiter ate? Step 1: The total ratio representing the pizza is 5+8 = <<5+8=13>>13. ки Step 2: The waiter ate 13 x 8 / 13 = <<13*8/13=6>>6 slices of the pizza. ки Step 3: Buzz ate 78 - 6 = <<78-6=72>>72 slices of the pizza. ки Step 4: The waiter ate 20 less than the number of slices that Buzz ate which is 72 - 20 = 52. ки Step 5: The waiter ate 52 slices of the pizza. The answer is: 52 ки ``` 2. "label": problem + step-by-step solution with automatic label, e.g., ``` If Buzz bought a pizza with 78 slices at a restaurant and then decided to share it with the waiter in the ratio of 5:8, with Buzz's ratio being 5, what's twenty less the number of slices of pizza that the waiter ate? Step 1: The total ratio representing the pizza is 5+8 = <<5+8=13>>13. + Step 2: The waiter ate 13 x 8 / 13 = <<13*8/13=6>>6 slices of the pizza. - Step 3: Buzz ate 78 - 6 = <<78-6=72>>72 slices of the pizza. - Step 4: The waiter ate 20 less than the number of slices that Buzz ate which is 72 - 20 = 52. - Step 5: The waiter ate 52 slices of the pizza. The answer is: 52 - ``` 3. "task": `GSM8K` or `MATH`. NOTE: "`ки`" serves as a unique token denoting the position for predicting the step score. "`+`" signifies a good step, as it has the potential to lead towards the correct answer. "`-`" denotes a bad step. When we train PRMs, we only compute the loss of the positions of `ки`. # Models: We utilized internal code for step-wise PPO training, which cannot be open-sourced. We hope for your understanding. We provide the checkpoints of SFT, PRM, and RL models to help everyone reproduce our results. - Mistral-7b-sft: https://huggingface.co/peiyi9979/mistral-7b-sft - Mistral-7b-prm: https://huggingface.co/peiyi9979/math-shepherd-mistral-7b-prm - Mistral-7b-rl: https://huggingface.co/peiyi9979/math-shepherd-mistral-7b-rl
peiyi9979/Math-Shepherd
[ "prm", "synthesized data", "arxiv:2312.08935", "region:us" ]
2023-12-11T12:04:14+00:00
{"tags": ["prm", "synthesized data"]}
2024-01-03T06:13:49+00:00
[ "2312.08935" ]
[]
TAGS #prm #synthesized data #arxiv-2312.08935 #region-us
# Dataset Card for Math-Shepherd Project Page: Math-Shepherd Paper: URL # Data Loading # Data Instance Every instance consists of three data fields: "input," "label," and "task". 1. "input": problem + step-by-step solution, e.g., 2. "label": problem + step-by-step solution with automatic label, e.g., 3. "task": 'GSM8K' or 'MATH'. NOTE: "'ки'" serves as a unique token denoting the position for predicting the step score. "'+'" signifies a good step, as it has the potential to lead towards the correct answer. "'-'" denotes a bad step. When we train PRMs, we only compute the loss of the positions of 'ки'. # Models: We utilized internal code for step-wise PPO training, which cannot be open-sourced. We hope for your understanding. We provide the checkpoints of SFT, PRM, and RL models to help everyone reproduce our results. - Mistral-7b-sft: URL - Mistral-7b-prm: URL - Mistral-7b-rl: URL
[ "# Dataset Card for Math-Shepherd\nProject Page: Math-Shepherd\n\nPaper: URL", "# Data Loading", "# Data Instance\nEvery instance consists of three data fields: \"input,\" \"label,\" and \"task\".\n\n1. \"input\": problem + step-by-step solution, e.g.,\n\n\n2. \"label\": problem + step-by-step solution with automatic label, e.g.,\n\n\n3. \"task\": 'GSM8K' or 'MATH'.\n\nNOTE:\n\n\"'ки'\" serves as a unique token denoting the position for predicting the step score.\n\n\"'+'\" signifies a good step, as it has the potential to lead towards the correct answer.\n\n\"'-'\" denotes a bad step.\n\nWhen we train PRMs, we only compute the loss of the positions of 'ки'.", "# Models:\nWe utilized internal code for step-wise PPO training, which cannot be open-sourced. We hope for your understanding. We provide the checkpoints of SFT, PRM, and RL models to help everyone reproduce our results.\n\n- Mistral-7b-sft: URL\n- Mistral-7b-prm: URL\n- Mistral-7b-rl: URL" ]
[ "TAGS\n#prm #synthesized data #arxiv-2312.08935 #region-us \n", "# Dataset Card for Math-Shepherd\nProject Page: Math-Shepherd\n\nPaper: URL", "# Data Loading", "# Data Instance\nEvery instance consists of three data fields: \"input,\" \"label,\" and \"task\".\n\n1. \"input\": problem + step-by-step solution, e.g.,\n\n\n2. \"label\": problem + step-by-step solution with automatic label, e.g.,\n\n\n3. \"task\": 'GSM8K' or 'MATH'.\n\nNOTE:\n\n\"'ки'\" serves as a unique token denoting the position for predicting the step score.\n\n\"'+'\" signifies a good step, as it has the potential to lead towards the correct answer.\n\n\"'-'\" denotes a bad step.\n\nWhen we train PRMs, we only compute the loss of the positions of 'ки'.", "# Models:\nWe utilized internal code for step-wise PPO training, which cannot be open-sourced. We hope for your understanding. We provide the checkpoints of SFT, PRM, and RL models to help everyone reproduce our results.\n\n- Mistral-7b-sft: URL\n- Mistral-7b-prm: URL\n- Mistral-7b-rl: URL" ]
[ 24, 21, 3, 171, 85 ]
[ "passage: TAGS\n#prm #synthesized data #arxiv-2312.08935 #region-us \n# Dataset Card for Math-Shepherd\nProject Page: Math-Shepherd\n\nPaper: URL# Data Loading# Data Instance\nEvery instance consists of three data fields: \"input,\" \"label,\" and \"task\".\n\n1. \"input\": problem + step-by-step solution, e.g.,\n\n\n2. \"label\": problem + step-by-step solution with automatic label, e.g.,\n\n\n3. \"task\": 'GSM8K' or 'MATH'.\n\nNOTE:\n\n\"'ки'\" serves as a unique token denoting the position for predicting the step score.\n\n\"'+'\" signifies a good step, as it has the potential to lead towards the correct answer.\n\n\"'-'\" denotes a bad step.\n\nWhen we train PRMs, we only compute the loss of the positions of 'ки'.# Models:\nWe utilized internal code for step-wise PPO training, which cannot be open-sourced. We hope for your understanding. We provide the checkpoints of SFT, PRM, and RL models to help everyone reproduce our results.\n\n- Mistral-7b-sft: URL\n- Mistral-7b-prm: URL\n- Mistral-7b-rl: URL" ]
605dfde55d1a10a685094c0706b9324f71236687
# Dataset Card for "sys-human_db3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
collabteza/sys-human_db3
[ "region:us" ]
2023-12-11T12:04:45+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "System Prompt", "dtype": "string"}, {"name": "Human Prompt", "dtype": "string"}, {"name": "Output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1092224, "num_examples": 1354}], "download_size": 481074, "dataset_size": 1092224}}
2023-12-11T16:34:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sys-human_db3" More Information needed
[ "# Dataset Card for \"sys-human_db3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sys-human_db3\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"sys-human_db3\"\n\nMore Information needed" ]
62638c1f77a0842b630940aeb2a42f04572a2f40
# Dataset Card for "voxpopuli_windows_cs" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Predict9731/voxpopuli_windows_cs
[ "region:us" ]
2023-12-11T12:19:28+00:00
{"dataset_info": {"features": [{"name": "audio_id", "dtype": "string"}, {"name": "language", "dtype": {"class_label": {"names": {"0": "en", "1": "de", "2": "fr", "3": "es", "4": "pl", "5": "it", "6": "ro", "7": "hu", "8": "cs", "9": "nl", "10": "fi", "11": "hr", "12": "sk", "13": "sl", "14": "et", "15": "lt", "16": "en_accented"}}}}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "raw_text", "dtype": "string"}, {"name": "normalized_text", "dtype": "string"}, {"name": "gender", "dtype": "string"}, {"name": "speaker_id", "dtype": "string"}, {"name": "is_gold_transcript", "dtype": "bool"}, {"name": "accent", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6549063392.628, "num_examples": 18902}], "download_size": 10449462424, "dataset_size": 6549063392.628}}
2023-12-11T12:28:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "voxpopuli_windows_cs" More Information needed
[ "# Dataset Card for \"voxpopuli_windows_cs\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"voxpopuli_windows_cs\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"voxpopuli_windows_cs\"\n\nMore Information needed" ]
185eb350a75261ca19f148f2100a1c9d925d9e1b
# MT-Bench-French This is a French version of [MT-Bench](https://arxiv.org/abs/2306.05685), created to evaluate the multi-turn conversation and instruction-following capabilities of LLMs. Similar to its original version, MT-Bench-French comprises 80 high-quality, multi-turn questions spanning eight main categories. All questions have undergone translation into French and **thorough human review** to guarantee the use of suitable and authentic wording, meaningful content for assessing LLMs' capabilities in the French language, and coherence between questions within the same conversation. For certain challenging tasks (e.g., math, reasoning, and coding), a reference answer is included in the judge prompt to assist in evaluating responses from LLMs, referred to as a *reference-guided judge*. Notably, these reference answers are also generated by the LLM judge (GPT-4). In our version, we took an extra step of reviewing and correcting these reference answers by human. This was done to address several concerns: 1) GPT-4 exhibited a decline in performance when transitioning from English to French. The responses generated for complex tasks did not meet the required standards to function as reference answers. 2) Human-corrected reference answer helps mitigate the bias in evaluating LLMs. However, it's important to note that some degree of bias still persists. *Please note that although this dataset provides a convenient way to evaluate LLMs, it shouldn't be regarded as the ultimate benchmark for such assessments, given the inherent limitations of both the dataset and the methodology.* ## News - [2024/01/26]: Added `mistral-small` with thanks to @thomlevy ## Examples Here are a few examples to highlight the distinction: #### Choosing appropriate and authentic wording *Original question:* ``` Given the following data, identify the company with the highest profit in 2021 and provide its CEO's name: ... Which company had the highest profit margin (profit/revenue ratio)? ``` *Translated question:* ``` Étant donné les informations suivantes, identifie le nom de l'entreprise qui a réalisé le plus gros bénéfice en 2021 et fournis le nom de son PDG : ... Quelle entreprise avait la marge bénéficiaire la plus élevée (rapport bénéfice/chiffre d'affaires) ? ``` Certain translators translate "profit/revenue ratio" as "rapport bénéfice/revenu", but the accurate translation should be "rapport bénéfice/chiffre d'affaires". #### Following original question format *Original question:* ``` Can you change the ratings from numbers to letters? Capital letters MUST be used when writing the names of phones. ``` *Translated question:* ``` Pouvez-vous changer les notes de chiffres en lettres ? Les noms des téléphones doivent être écrits IMPÉRATIVEMENT en lettres majuscules. ``` We maintain the original question's format, highlighting "MUST" in uppercase ("IMPÉRATIVEMENT" in French) to grab the attention of the language model. Additionally, we uphold other formats, including indentation and line breaks, in the translated version. #### Avoiding unnecessary translation of Anglicisms *Original question:* ``` A tech startup invests $8000 in software development in the first year... ``` *Translated question:* ``` Une startup technologique investit 8000 euros dans le développement de logiciels la première année... ``` Some English terms were kept as-is, as they are commonly used in French. #### Mixing formal and informal pronouns for diversity *Translated question 1:* ``` Veuillez assumer le rôle d'un coach relationnel. Vous recevrez des détails sur deux personnes en conflit, et votre tâche sera de proposer des suggestions pour résoudre leurs problèmes et combler le fossé entre eux. ``` *Translated question 2:* ``` Crée un plan de leçon intégrant des techniques de théâtre ``` *Translated question 3:* ``` Est-ce que tu aimes danser ? Peux-tu m'apprendre ? ``` #### Ensuring meaningfulness in the translated questions *Original question:* ``` Edit the following paragraph to correct any grammatical errors: She didn't remembre where is her purse, so I thinks its in the car but he's say it's on kitchen table but he are not sure, and then they asked me to looking for it, she's say, "Can you?", and I responds with, "Maybe, but ain't no sure," and he not heard me, and, "What?", he asks, "Did you found it?". ``` *Translated question:* ``` Editez le paragraphe suivant pour corriger toute erreur grammaticale : Elle ne se souvenaeint pas où été son sac à main, donc je penses qu'il est dans le voiture, mais il à dis qu'il est sur table du cuisine, bien qu'il n'en soient pas sûre. Ensuite, ils m'ont demandé de le cherchez. "Tu peut ?", elle a demandée, et j'ai répond, "Peut être, mais ne suis pas sûr." Il ne ma entendu, et il a demander "Quoi ? Tu l'a trouvés ?" ``` Some translators might rectify grammatical errors in the sentence. In contrast, we translated it and purposely introduced certain common errors in French. #### Guaranteeing the translated questions are suitable for evaluating LLMs in French *Original question:* ``` Please assume the role of an English translator, tasked with correcting and enhancing spelling and language. Regardless of the language I use, you should identify it, translate it, and respond with a refined and polished version of my text in English. Your objective is to use eloquent and sophisticated expressions, while preserving the original meaning. Focus solely on providing corrections and improvements. My first request is "衣带渐宽终不悔 为伊消得人憔悴". ``` *Translated question:* ``` Joue le rôle d'un traducteur francophone que l'on a chargé de corriger et d'embellir l'orthographe et l'expression de mon travail. Indépendamment de la langue utilisée, identifie-la, traduis-la et produis une version française plus raffinée de mon texte. Ton but est d'employer des expressions éloquentes et sophistiquées tout en préservant le sens original. Contente-toi de fournir des corrections et des améliorations. Ma première requête est la suivante : "衣带渐宽终不悔 为伊消得人憔悴". ``` Given that we are evaluating LLMs for the French language, we request the model to translate a sentence into French instead of English. #### Miscellaneous *Original question:* ``` "Compose an engaging travel blog post about a recent trip to Hawaii, highlighting cultural experiences and must-see attractions. ``` *Translated question:* ``` Rédigez un blog de voyage captivant sur un voyage récent en Corse, en mettant en évidence les expériences culturelles et les attractions incontournables. ``` We replaced the destination 'Hawaii' with 'Corse' since it is more aligned with French culture, along with other changes like substituting "dollar" with "euro". ## How to evaluate custom models Please refer to the [instructions](https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge#mt-bench) of LMSYS for guidance on evaluating custom models. ## Limitations This dataset serves the purpose of efficiently evaluating the performance of LLMs in the French language. However, it's important to acknowledge its limitations, which include: - GPT-4's inherent bias in assessing LLM responses, particularly in aspects like position, verbosity, and self-enhancement. Additionally, it exhibits limitations in effectively grading challenging tasks, such as those requiring advanced mathematical and reasoning capabilities. - The dataset encompasses only 10 tasks (20 questions) per category, which might not provide a comprehensive representation of the full capabilities of LLMs. - A majority of the questions were translated from their original English version, potentially impacting the dataset's ability to accurately reflect performance in the context of French culture. ## Acknowledgment - [LMSYS](https://lmsys.org) for creating the original dataset - [Audrey Cornu](https://www.linkedin.com/in/audrey-cornu-0b9808142), [Tiphaine Fievet](https://www.linkedin.com/in/tiphaine-fievet-84b3431b8), [Amira Guesmi](https://www.linkedin.com/in/amira-guesmi-4a435684), [Cindy Perrigault](https://www.linkedin.com/in/cindy-perrigault), [Hayoung Seo](https://www.linkedin.com/in/hayoung-seo-180b26200) (in alphabetical order), and myself for the translation and careful review
bofenghuang/mt-bench-french
[ "task_categories:question-answering", "task_categories:conversational", "size_categories:n<1K", "language:fr", "license:apache-2.0", "evaluation", "arxiv:2306.05685", "region:us" ]
2023-12-11T13:01:43+00:00
{"language": ["fr"], "license": "apache-2.0", "size_categories": ["n<1K"], "task_categories": ["question-answering", "conversational"], "pretty_name": "MT-Bench-French", "tags": ["evaluation"], "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "question.jsonl"}]}]}
2024-01-26T10:14:19+00:00
[ "2306.05685" ]
[ "fr" ]
TAGS #task_categories-question-answering #task_categories-conversational #size_categories-n<1K #language-French #license-apache-2.0 #evaluation #arxiv-2306.05685 #region-us
# MT-Bench-French This is a French version of MT-Bench, created to evaluate the multi-turn conversation and instruction-following capabilities of LLMs. Similar to its original version, MT-Bench-French comprises 80 high-quality, multi-turn questions spanning eight main categories. All questions have undergone translation into French and thorough human review to guarantee the use of suitable and authentic wording, meaningful content for assessing LLMs' capabilities in the French language, and coherence between questions within the same conversation. For certain challenging tasks (e.g., math, reasoning, and coding), a reference answer is included in the judge prompt to assist in evaluating responses from LLMs, referred to as a *reference-guided judge*. Notably, these reference answers are also generated by the LLM judge (GPT-4). In our version, we took an extra step of reviewing and correcting these reference answers by human. This was done to address several concerns: 1) GPT-4 exhibited a decline in performance when transitioning from English to French. The responses generated for complex tasks did not meet the required standards to function as reference answers. 2) Human-corrected reference answer helps mitigate the bias in evaluating LLMs. However, it's important to note that some degree of bias still persists. *Please note that although this dataset provides a convenient way to evaluate LLMs, it shouldn't be regarded as the ultimate benchmark for such assessments, given the inherent limitations of both the dataset and the methodology.* ## News - [2024/01/26]: Added 'mistral-small' with thanks to @thomlevy ## Examples Here are a few examples to highlight the distinction: #### Choosing appropriate and authentic wording *Original question:* *Translated question:* Certain translators translate "profit/revenue ratio" as "rapport bénéfice/revenu", but the accurate translation should be "rapport bénéfice/chiffre d'affaires". #### Following original question format *Original question:* *Translated question:* We maintain the original question's format, highlighting "MUST" in uppercase ("IMPÉRATIVEMENT" in French) to grab the attention of the language model. Additionally, we uphold other formats, including indentation and line breaks, in the translated version. #### Avoiding unnecessary translation of Anglicisms *Original question:* *Translated question:* Some English terms were kept as-is, as they are commonly used in French. #### Mixing formal and informal pronouns for diversity *Translated question 1:* *Translated question 2:* *Translated question 3:* #### Ensuring meaningfulness in the translated questions *Original question:* *Translated question:* Some translators might rectify grammatical errors in the sentence. In contrast, we translated it and purposely introduced certain common errors in French. #### Guaranteeing the translated questions are suitable for evaluating LLMs in French *Original question:* *Translated question:* Given that we are evaluating LLMs for the French language, we request the model to translate a sentence into French instead of English. #### Miscellaneous *Original question:* *Translated question:* We replaced the destination 'Hawaii' with 'Corse' since it is more aligned with French culture, along with other changes like substituting "dollar" with "euro". ## How to evaluate custom models Please refer to the instructions of LMSYS for guidance on evaluating custom models. ## Limitations This dataset serves the purpose of efficiently evaluating the performance of LLMs in the French language. However, it's important to acknowledge its limitations, which include: - GPT-4's inherent bias in assessing LLM responses, particularly in aspects like position, verbosity, and self-enhancement. Additionally, it exhibits limitations in effectively grading challenging tasks, such as those requiring advanced mathematical and reasoning capabilities. - The dataset encompasses only 10 tasks (20 questions) per category, which might not provide a comprehensive representation of the full capabilities of LLMs. - A majority of the questions were translated from their original English version, potentially impacting the dataset's ability to accurately reflect performance in the context of French culture. ## Acknowledgment - LMSYS for creating the original dataset - Audrey Cornu, Tiphaine Fievet, Amira Guesmi, Cindy Perrigault, Hayoung Seo (in alphabetical order), and myself for the translation and careful review
[ "# MT-Bench-French\n\nThis is a French version of MT-Bench, created to evaluate the multi-turn conversation and instruction-following capabilities of LLMs. Similar to its original version, MT-Bench-French comprises 80 high-quality, multi-turn questions spanning eight main categories.\n\nAll questions have undergone translation into French and thorough human review to guarantee the use of suitable and authentic wording, meaningful content for assessing LLMs' capabilities in the French language, and coherence between questions within the same conversation.\n\nFor certain challenging tasks (e.g., math, reasoning, and coding), a reference answer is included in the judge prompt to assist in evaluating responses from LLMs, referred to as a *reference-guided judge*. Notably, these reference answers are also generated by the LLM judge (GPT-4). In our version, we took an extra step of reviewing and correcting these reference answers by human. This was done to address several concerns: 1) GPT-4 exhibited a decline in performance when transitioning from English to French. The responses generated for complex tasks did not meet the required standards to function as reference answers. 2) Human-corrected reference answer helps mitigate the bias in evaluating LLMs. However, it's important to note that some degree of bias still persists.\n\n*Please note that although this dataset provides a convenient way to evaluate LLMs, it shouldn't be regarded as the ultimate benchmark for such assessments, given the inherent limitations of both the dataset and the methodology.*", "## News\n\n- [2024/01/26]: Added 'mistral-small' with thanks to @thomlevy", "## Examples\n\nHere are a few examples to highlight the distinction:", "#### Choosing appropriate and authentic wording\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nCertain translators translate \"profit/revenue ratio\" as \"rapport bénéfice/revenu\", but the accurate translation should be \"rapport bénéfice/chiffre d'affaires\".", "#### Following original question format\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nWe maintain the original question's format, highlighting \"MUST\" in uppercase (\"IMPÉRATIVEMENT\" in French) to grab the attention of the language model. Additionally, we uphold other formats, including indentation and line breaks, in the translated version.", "#### Avoiding unnecessary translation of Anglicisms\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nSome English terms were kept as-is, as they are commonly used in French.", "#### Mixing formal and informal pronouns for diversity\n\n*Translated question 1:*\n\n\n\n*Translated question 2:*\n\n\n\n*Translated question 3:*", "#### Ensuring meaningfulness in the translated questions\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nSome translators might rectify grammatical errors in the sentence. In contrast, we translated it and purposely introduced certain common errors in French.", "#### Guaranteeing the translated questions are suitable for evaluating LLMs in French\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nGiven that we are evaluating LLMs for the French language, we request the model to translate a sentence into French instead of English.", "#### Miscellaneous\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nWe replaced the destination 'Hawaii' with 'Corse' since it is more aligned with French culture, along with other changes like substituting \"dollar\" with \"euro\".", "## How to evaluate custom models\n\nPlease refer to the instructions of LMSYS for guidance on evaluating custom models.", "## Limitations\n\nThis dataset serves the purpose of efficiently evaluating the performance of LLMs in the French language. However, it's important to acknowledge its limitations, which include:\n\n- GPT-4's inherent bias in assessing LLM responses, particularly in aspects like position, verbosity, and self-enhancement. Additionally, it exhibits limitations in effectively grading challenging tasks, such as those requiring advanced mathematical and reasoning capabilities.\n- The dataset encompasses only 10 tasks (20 questions) per category, which might not provide a comprehensive representation of the full capabilities of LLMs.\n- A majority of the questions were translated from their original English version, potentially impacting the dataset's ability to accurately reflect performance in the context of French culture.", "## Acknowledgment\n\n- LMSYS for creating the original dataset\n- Audrey Cornu, Tiphaine Fievet, Amira Guesmi, Cindy Perrigault, Hayoung Seo (in alphabetical order), and myself for the translation and careful review" ]
[ "TAGS\n#task_categories-question-answering #task_categories-conversational #size_categories-n<1K #language-French #license-apache-2.0 #evaluation #arxiv-2306.05685 #region-us \n", "# MT-Bench-French\n\nThis is a French version of MT-Bench, created to evaluate the multi-turn conversation and instruction-following capabilities of LLMs. Similar to its original version, MT-Bench-French comprises 80 high-quality, multi-turn questions spanning eight main categories.\n\nAll questions have undergone translation into French and thorough human review to guarantee the use of suitable and authentic wording, meaningful content for assessing LLMs' capabilities in the French language, and coherence between questions within the same conversation.\n\nFor certain challenging tasks (e.g., math, reasoning, and coding), a reference answer is included in the judge prompt to assist in evaluating responses from LLMs, referred to as a *reference-guided judge*. Notably, these reference answers are also generated by the LLM judge (GPT-4). In our version, we took an extra step of reviewing and correcting these reference answers by human. This was done to address several concerns: 1) GPT-4 exhibited a decline in performance when transitioning from English to French. The responses generated for complex tasks did not meet the required standards to function as reference answers. 2) Human-corrected reference answer helps mitigate the bias in evaluating LLMs. However, it's important to note that some degree of bias still persists.\n\n*Please note that although this dataset provides a convenient way to evaluate LLMs, it shouldn't be regarded as the ultimate benchmark for such assessments, given the inherent limitations of both the dataset and the methodology.*", "## News\n\n- [2024/01/26]: Added 'mistral-small' with thanks to @thomlevy", "## Examples\n\nHere are a few examples to highlight the distinction:", "#### Choosing appropriate and authentic wording\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nCertain translators translate \"profit/revenue ratio\" as \"rapport bénéfice/revenu\", but the accurate translation should be \"rapport bénéfice/chiffre d'affaires\".", "#### Following original question format\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nWe maintain the original question's format, highlighting \"MUST\" in uppercase (\"IMPÉRATIVEMENT\" in French) to grab the attention of the language model. Additionally, we uphold other formats, including indentation and line breaks, in the translated version.", "#### Avoiding unnecessary translation of Anglicisms\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nSome English terms were kept as-is, as they are commonly used in French.", "#### Mixing formal and informal pronouns for diversity\n\n*Translated question 1:*\n\n\n\n*Translated question 2:*\n\n\n\n*Translated question 3:*", "#### Ensuring meaningfulness in the translated questions\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nSome translators might rectify grammatical errors in the sentence. In contrast, we translated it and purposely introduced certain common errors in French.", "#### Guaranteeing the translated questions are suitable for evaluating LLMs in French\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nGiven that we are evaluating LLMs for the French language, we request the model to translate a sentence into French instead of English.", "#### Miscellaneous\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nWe replaced the destination 'Hawaii' with 'Corse' since it is more aligned with French culture, along with other changes like substituting \"dollar\" with \"euro\".", "## How to evaluate custom models\n\nPlease refer to the instructions of LMSYS for guidance on evaluating custom models.", "## Limitations\n\nThis dataset serves the purpose of efficiently evaluating the performance of LLMs in the French language. However, it's important to acknowledge its limitations, which include:\n\n- GPT-4's inherent bias in assessing LLM responses, particularly in aspects like position, verbosity, and self-enhancement. Additionally, it exhibits limitations in effectively grading challenging tasks, such as those requiring advanced mathematical and reasoning capabilities.\n- The dataset encompasses only 10 tasks (20 questions) per category, which might not provide a comprehensive representation of the full capabilities of LLMs.\n- A majority of the questions were translated from their original English version, potentially impacting the dataset's ability to accurately reflect performance in the context of French culture.", "## Acknowledgment\n\n- LMSYS for creating the original dataset\n- Audrey Cornu, Tiphaine Fievet, Amira Guesmi, Cindy Perrigault, Hayoung Seo (in alphabetical order), and myself for the translation and careful review" ]
[ 63, 366, 25, 15, 62, 84, 43, 34, 63, 63, 58, 24, 183, 60 ]
[ "passage: TAGS\n#task_categories-question-answering #task_categories-conversational #size_categories-n<1K #language-French #license-apache-2.0 #evaluation #arxiv-2306.05685 #region-us \n# MT-Bench-French\n\nThis is a French version of MT-Bench, created to evaluate the multi-turn conversation and instruction-following capabilities of LLMs. Similar to its original version, MT-Bench-French comprises 80 high-quality, multi-turn questions spanning eight main categories.\n\nAll questions have undergone translation into French and thorough human review to guarantee the use of suitable and authentic wording, meaningful content for assessing LLMs' capabilities in the French language, and coherence between questions within the same conversation.\n\nFor certain challenging tasks (e.g., math, reasoning, and coding), a reference answer is included in the judge prompt to assist in evaluating responses from LLMs, referred to as a *reference-guided judge*. Notably, these reference answers are also generated by the LLM judge (GPT-4). In our version, we took an extra step of reviewing and correcting these reference answers by human. This was done to address several concerns: 1) GPT-4 exhibited a decline in performance when transitioning from English to French. The responses generated for complex tasks did not meet the required standards to function as reference answers. 2) Human-corrected reference answer helps mitigate the bias in evaluating LLMs. However, it's important to note that some degree of bias still persists.\n\n*Please note that although this dataset provides a convenient way to evaluate LLMs, it shouldn't be regarded as the ultimate benchmark for such assessments, given the inherent limitations of both the dataset and the methodology.*## News\n\n- [2024/01/26]: Added 'mistral-small' with thanks to @thomlevy## Examples\n\nHere are a few examples to highlight the distinction:", "passage: #### Choosing appropriate and authentic wording\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nCertain translators translate \"profit/revenue ratio\" as \"rapport bénéfice/revenu\", but the accurate translation should be \"rapport bénéfice/chiffre d'affaires\".#### Following original question format\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nWe maintain the original question's format, highlighting \"MUST\" in uppercase (\"IMPÉRATIVEMENT\" in French) to grab the attention of the language model. Additionally, we uphold other formats, including indentation and line breaks, in the translated version.#### Avoiding unnecessary translation of Anglicisms\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nSome English terms were kept as-is, as they are commonly used in French.#### Mixing formal and informal pronouns for diversity\n\n*Translated question 1:*\n\n\n\n*Translated question 2:*\n\n\n\n*Translated question 3:*#### Ensuring meaningfulness in the translated questions\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nSome translators might rectify grammatical errors in the sentence. In contrast, we translated it and purposely introduced certain common errors in French.#### Guaranteeing the translated questions are suitable for evaluating LLMs in French\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nGiven that we are evaluating LLMs for the French language, we request the model to translate a sentence into French instead of English.#### Miscellaneous\n\n*Original question:*\n\n\n\n*Translated question:*\n\n\n\nWe replaced the destination 'Hawaii' with 'Corse' since it is more aligned with French culture, along with other changes like substituting \"dollar\" with \"euro\".## How to evaluate custom models\n\nPlease refer to the instructions of LMSYS for guidance on evaluating custom models." ]
77251b39a4d5b7e0027a2c70921b9fef30d20947
# Dataset Card for Evaluation run of Deci/DeciLM-7B ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/Deci/DeciLM-7B - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [Deci/DeciLM-7B](https://huggingface.co/Deci/DeciLM-7B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_Deci/DeciLM-7B", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-11T13:05:55.242370](https://huggingface.co/datasets/open-llm-leaderboard/details_Deci__DeciLM-7B/blob/main/results_2023-12-11T13-05-55.242370.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.5986461662246719, "acc_stderr": 0.03322810922254394, "acc_norm": 0.6014214623320648, "acc_norm_stderr": 0.03391006890945986, "mc1": 0.2692778457772338, "mc1_stderr": 0.015528566637087295, "mc2": 0.4032625331106103, "mc2_stderr": 0.01398363920569579 }, "harness|arc:challenge|25": { "acc": 0.552901023890785, "acc_stderr": 0.014529380160526843, "acc_norm": 0.5938566552901023, "acc_norm_stderr": 0.014351656690097862 }, "harness|hellaswag|10": { "acc": 0.6262696673969329, "acc_stderr": 0.004828045774734898, "acc_norm": 0.8251344353714399, "acc_norm_stderr": 0.0037907576465758953 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.24, "acc_stderr": 0.04292346959909283, "acc_norm": 0.24, "acc_norm_stderr": 0.04292346959909283 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.5407407407407407, "acc_stderr": 0.04304979692464242, "acc_norm": 0.5407407407407407, "acc_norm_stderr": 0.04304979692464242 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.6644736842105263, "acc_stderr": 0.03842498559395268, "acc_norm": 0.6644736842105263, "acc_norm_stderr": 0.03842498559395268 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.56, "acc_stderr": 0.04988876515698589, "acc_norm": 0.56, "acc_norm_stderr": 0.04988876515698589 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.6792452830188679, "acc_stderr": 0.028727502957880267, "acc_norm": 0.6792452830188679, "acc_norm_stderr": 0.028727502957880267 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.6944444444444444, "acc_stderr": 0.03852084696008534, "acc_norm": 0.6944444444444444, "acc_norm_stderr": 0.03852084696008534 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.42, "acc_stderr": 0.049604496374885836, "acc_norm": 0.42, "acc_norm_stderr": 0.049604496374885836 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.42, "acc_stderr": 0.049604496374885836, "acc_norm": 0.42, "acc_norm_stderr": 0.049604496374885836 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.35, "acc_stderr": 0.0479372485441102, "acc_norm": 0.35, "acc_norm_stderr": 0.0479372485441102 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.630057803468208, "acc_stderr": 0.0368122963339432, "acc_norm": 0.630057803468208, "acc_norm_stderr": 0.0368122963339432 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.43137254901960786, "acc_stderr": 0.04928099597287533, "acc_norm": 0.43137254901960786, "acc_norm_stderr": 0.04928099597287533 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.72, "acc_stderr": 0.045126085985421276, "acc_norm": 0.72, "acc_norm_stderr": 0.045126085985421276 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.5106382978723404, "acc_stderr": 0.03267862331014063, "acc_norm": 0.5106382978723404, "acc_norm_stderr": 0.03267862331014063 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.40350877192982454, "acc_stderr": 0.046151869625837026, "acc_norm": 0.40350877192982454, "acc_norm_stderr": 0.046151869625837026 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.5448275862068965, "acc_stderr": 0.04149886942192117, "acc_norm": 0.5448275862068965, "acc_norm_stderr": 0.04149886942192117 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.38095238095238093, "acc_stderr": 0.025010749116137595, "acc_norm": 0.38095238095238093, "acc_norm_stderr": 0.025010749116137595 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.3412698412698413, "acc_stderr": 0.04240799327574924, "acc_norm": 0.3412698412698413, "acc_norm_stderr": 0.04240799327574924 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.32, "acc_stderr": 0.046882617226215034, "acc_norm": 0.32, "acc_norm_stderr": 0.046882617226215034 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.7225806451612903, "acc_stderr": 0.025470196835900055, "acc_norm": 0.7225806451612903, "acc_norm_stderr": 0.025470196835900055 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.46798029556650245, "acc_stderr": 0.035107665979592154, "acc_norm": 0.46798029556650245, "acc_norm_stderr": 0.035107665979592154 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.62, "acc_stderr": 0.048783173121456316, "acc_norm": 0.62, "acc_norm_stderr": 0.048783173121456316 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.7272727272727273, "acc_stderr": 0.0347769116216366, "acc_norm": 0.7272727272727273, "acc_norm_stderr": 0.0347769116216366 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.7474747474747475, "acc_stderr": 0.030954055470365897, "acc_norm": 0.7474747474747475, "acc_norm_stderr": 0.030954055470365897 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.8393782383419689, "acc_stderr": 0.02649905770139746, "acc_norm": 0.8393782383419689, "acc_norm_stderr": 0.02649905770139746 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.5794871794871795, "acc_stderr": 0.025028610276710855, "acc_norm": 0.5794871794871795, "acc_norm_stderr": 0.025028610276710855 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.3888888888888889, "acc_stderr": 0.029723278961476668, "acc_norm": 0.3888888888888889, "acc_norm_stderr": 0.029723278961476668 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.6260504201680672, "acc_stderr": 0.03142946637883708, "acc_norm": 0.6260504201680672, "acc_norm_stderr": 0.03142946637883708 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.3841059602649007, "acc_stderr": 0.03971301814719197, "acc_norm": 0.3841059602649007, "acc_norm_stderr": 0.03971301814719197 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.7871559633027523, "acc_stderr": 0.017549376389313694, "acc_norm": 0.7871559633027523, "acc_norm_stderr": 0.017549376389313694 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.47685185185185186, "acc_stderr": 0.03406315360711507, "acc_norm": 0.47685185185185186, "acc_norm_stderr": 0.03406315360711507 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.7598039215686274, "acc_stderr": 0.02998373305591362, "acc_norm": 0.7598039215686274, "acc_norm_stderr": 0.02998373305591362 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.7805907172995781, "acc_stderr": 0.026939106581553945, "acc_norm": 0.7805907172995781, "acc_norm_stderr": 0.026939106581553945 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.6681614349775785, "acc_stderr": 0.03160295143776679, "acc_norm": 0.6681614349775785, "acc_norm_stderr": 0.03160295143776679 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.6946564885496184, "acc_stderr": 0.040393149787245605, "acc_norm": 0.6946564885496184, "acc_norm_stderr": 0.040393149787245605 }, "harness|hendrycksTest-international_law|5": { "acc": 0.768595041322314, "acc_stderr": 0.03849856098794088, "acc_norm": 0.768595041322314, "acc_norm_stderr": 0.03849856098794088 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.7129629629629629, "acc_stderr": 0.043733130409147614, "acc_norm": 0.7129629629629629, "acc_norm_stderr": 0.043733130409147614 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.6871165644171779, "acc_stderr": 0.036429145782924055, "acc_norm": 0.6871165644171779, "acc_norm_stderr": 0.036429145782924055 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.4642857142857143, "acc_stderr": 0.04733667890053756, "acc_norm": 0.4642857142857143, "acc_norm_stderr": 0.04733667890053756 }, "harness|hendrycksTest-management|5": { "acc": 0.7475728155339806, "acc_stderr": 0.04301250399690878, "acc_norm": 0.7475728155339806, "acc_norm_stderr": 0.04301250399690878 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8461538461538461, "acc_stderr": 0.02363687331748927, "acc_norm": 0.8461538461538461, "acc_norm_stderr": 0.02363687331748927 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.63, "acc_stderr": 0.04852365870939099, "acc_norm": 0.63, "acc_norm_stderr": 0.04852365870939099 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.7879948914431673, "acc_stderr": 0.01461609938583368, "acc_norm": 0.7879948914431673, "acc_norm_stderr": 0.01461609938583368 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.6705202312138728, "acc_stderr": 0.025305258131879695, "acc_norm": 0.6705202312138728, "acc_norm_stderr": 0.025305258131879695 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.2424581005586592, "acc_stderr": 0.01433352205921789, "acc_norm": 0.2424581005586592, "acc_norm_stderr": 0.01433352205921789 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.696078431372549, "acc_stderr": 0.02633661346904663, "acc_norm": 0.696078431372549, "acc_norm_stderr": 0.02633661346904663 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.6688102893890675, "acc_stderr": 0.02673062072800491, "acc_norm": 0.6688102893890675, "acc_norm_stderr": 0.02673062072800491 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.6728395061728395, "acc_stderr": 0.026105673861409825, "acc_norm": 0.6728395061728395, "acc_norm_stderr": 0.026105673861409825 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.4645390070921986, "acc_stderr": 0.029752389657427047, "acc_norm": 0.4645390070921986, "acc_norm_stderr": 0.029752389657427047 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.4276401564537158, "acc_stderr": 0.012635799922765846, "acc_norm": 0.4276401564537158, "acc_norm_stderr": 0.012635799922765846 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.6139705882352942, "acc_stderr": 0.029573269134411124, "acc_norm": 0.6139705882352942, "acc_norm_stderr": 0.029573269134411124 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.5915032679738562, "acc_stderr": 0.01988622103750187, "acc_norm": 0.5915032679738562, "acc_norm_stderr": 0.01988622103750187 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.6454545454545455, "acc_stderr": 0.045820048415054174, "acc_norm": 0.6454545454545455, "acc_norm_stderr": 0.045820048415054174 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.7183673469387755, "acc_stderr": 0.02879518557429129, "acc_norm": 0.7183673469387755, "acc_norm_stderr": 0.02879518557429129 }, "harness|hendrycksTest-sociology|5": { "acc": 0.8208955223880597, "acc_stderr": 0.027113286753111837, "acc_norm": 0.8208955223880597, "acc_norm_stderr": 0.027113286753111837 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.77, "acc_stderr": 0.04229525846816506, "acc_norm": 0.77, "acc_norm_stderr": 0.04229525846816506 }, "harness|hendrycksTest-virology|5": { "acc": 0.4939759036144578, "acc_stderr": 0.03892212195333045, "acc_norm": 0.4939759036144578, "acc_norm_stderr": 0.03892212195333045 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8011695906432749, "acc_stderr": 0.030611116557432528, "acc_norm": 0.8011695906432749, "acc_norm_stderr": 0.030611116557432528 }, "harness|truthfulqa:mc|0": { "mc1": 0.2692778457772338, "mc1_stderr": 0.015528566637087295, "mc2": 0.4032625331106103, "mc2_stderr": 0.01398363920569579 }, "harness|winogrande|5": { "acc": 0.7995264404104183, "acc_stderr": 0.011251958281205083 }, "harness|gsm8k|5": { "acc": 0.47384382107657314, "acc_stderr": 0.013753627037255045 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_Deci__DeciLM-7B
[ "region:us" ]
2023-12-11T13:08:44+00:00
{"pretty_name": "Evaluation run of Deci/DeciLM-7B", "dataset_summary": "Dataset automatically created during the evaluation run of model [Deci/DeciLM-7B](https://huggingface.co/Deci/DeciLM-7B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_Deci-early-access__DeciLM-7B-early_private\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-11T13:05:55.242370](https://huggingface.co/datasets/open-llm-leaderboard/details_Deci-early-access__DeciLM-7B-early_private/blob/main/results_2023-12-11T13-05-55.242370.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.5986461662246719,\n \"acc_stderr\": 0.03322810922254394,\n \"acc_norm\": 0.6014214623320648,\n \"acc_norm_stderr\": 0.03391006890945986,\n \"mc1\": 0.2692778457772338,\n \"mc1_stderr\": 0.015528566637087295,\n \"mc2\": 0.4032625331106103,\n \"mc2_stderr\": 0.01398363920569579\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.552901023890785,\n \"acc_stderr\": 0.014529380160526843,\n \"acc_norm\": 0.5938566552901023,\n \"acc_norm_stderr\": 0.014351656690097862\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6262696673969329,\n \"acc_stderr\": 0.004828045774734898,\n \"acc_norm\": 0.8251344353714399,\n \"acc_norm_stderr\": 0.0037907576465758953\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.24,\n \"acc_stderr\": 0.04292346959909283,\n \"acc_norm\": 0.24,\n \"acc_norm_stderr\": 0.04292346959909283\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.5407407407407407,\n \"acc_stderr\": 0.04304979692464242,\n \"acc_norm\": 0.5407407407407407,\n \"acc_norm_stderr\": 0.04304979692464242\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.6644736842105263,\n \"acc_stderr\": 0.03842498559395268,\n \"acc_norm\": 0.6644736842105263,\n \"acc_norm_stderr\": 0.03842498559395268\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.56,\n \"acc_stderr\": 0.04988876515698589,\n \"acc_norm\": 0.56,\n \"acc_norm_stderr\": 0.04988876515698589\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.6792452830188679,\n \"acc_stderr\": 0.028727502957880267,\n \"acc_norm\": 0.6792452830188679,\n \"acc_norm_stderr\": 0.028727502957880267\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.6944444444444444,\n \"acc_stderr\": 0.03852084696008534,\n \"acc_norm\": 0.6944444444444444,\n \"acc_norm_stderr\": 0.03852084696008534\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.42,\n \"acc_stderr\": 0.049604496374885836,\n \"acc_norm\": 0.42,\n \"acc_norm_stderr\": 0.049604496374885836\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.42,\n \"acc_stderr\": 0.049604496374885836,\n \"acc_norm\": 0.42,\n \"acc_norm_stderr\": 0.049604496374885836\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.35,\n \"acc_stderr\": 0.0479372485441102,\n \"acc_norm\": 0.35,\n \"acc_norm_stderr\": 0.0479372485441102\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.630057803468208,\n \"acc_stderr\": 0.0368122963339432,\n \"acc_norm\": 0.630057803468208,\n \"acc_norm_stderr\": 0.0368122963339432\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.43137254901960786,\n \"acc_stderr\": 0.04928099597287533,\n \"acc_norm\": 0.43137254901960786,\n \"acc_norm_stderr\": 0.04928099597287533\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.72,\n \"acc_stderr\": 0.045126085985421276,\n \"acc_norm\": 0.72,\n \"acc_norm_stderr\": 0.045126085985421276\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.5106382978723404,\n \"acc_stderr\": 0.03267862331014063,\n \"acc_norm\": 0.5106382978723404,\n \"acc_norm_stderr\": 0.03267862331014063\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.40350877192982454,\n \"acc_stderr\": 0.046151869625837026,\n \"acc_norm\": 0.40350877192982454,\n \"acc_norm_stderr\": 0.046151869625837026\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5448275862068965,\n \"acc_stderr\": 0.04149886942192117,\n \"acc_norm\": 0.5448275862068965,\n \"acc_norm_stderr\": 0.04149886942192117\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.38095238095238093,\n \"acc_stderr\": 0.025010749116137595,\n \"acc_norm\": 0.38095238095238093,\n \"acc_norm_stderr\": 0.025010749116137595\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.3412698412698413,\n \"acc_stderr\": 0.04240799327574924,\n \"acc_norm\": 0.3412698412698413,\n \"acc_norm_stderr\": 0.04240799327574924\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.32,\n \"acc_stderr\": 0.046882617226215034,\n \"acc_norm\": 0.32,\n \"acc_norm_stderr\": 0.046882617226215034\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.7225806451612903,\n \"acc_stderr\": 0.025470196835900055,\n \"acc_norm\": 0.7225806451612903,\n \"acc_norm_stderr\": 0.025470196835900055\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.46798029556650245,\n \"acc_stderr\": 0.035107665979592154,\n \"acc_norm\": 0.46798029556650245,\n \"acc_norm_stderr\": 0.035107665979592154\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.62,\n \"acc_stderr\": 0.048783173121456316,\n \"acc_norm\": 0.62,\n \"acc_norm_stderr\": 0.048783173121456316\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7272727272727273,\n \"acc_stderr\": 0.0347769116216366,\n \"acc_norm\": 0.7272727272727273,\n \"acc_norm_stderr\": 0.0347769116216366\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.7474747474747475,\n \"acc_stderr\": 0.030954055470365897,\n \"acc_norm\": 0.7474747474747475,\n \"acc_norm_stderr\": 0.030954055470365897\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.8393782383419689,\n \"acc_stderr\": 0.02649905770139746,\n \"acc_norm\": 0.8393782383419689,\n \"acc_norm_stderr\": 0.02649905770139746\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.5794871794871795,\n \"acc_stderr\": 0.025028610276710855,\n \"acc_norm\": 0.5794871794871795,\n \"acc_norm_stderr\": 0.025028610276710855\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.3888888888888889,\n \"acc_stderr\": 0.029723278961476668,\n \"acc_norm\": 0.3888888888888889,\n \"acc_norm_stderr\": 0.029723278961476668\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.6260504201680672,\n \"acc_stderr\": 0.03142946637883708,\n \"acc_norm\": 0.6260504201680672,\n \"acc_norm_stderr\": 0.03142946637883708\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.3841059602649007,\n \"acc_stderr\": 0.03971301814719197,\n \"acc_norm\": 0.3841059602649007,\n \"acc_norm_stderr\": 0.03971301814719197\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.7871559633027523,\n \"acc_stderr\": 0.017549376389313694,\n \"acc_norm\": 0.7871559633027523,\n \"acc_norm_stderr\": 0.017549376389313694\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.47685185185185186,\n \"acc_stderr\": 0.03406315360711507,\n \"acc_norm\": 0.47685185185185186,\n \"acc_norm_stderr\": 0.03406315360711507\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.7598039215686274,\n \"acc_stderr\": 0.02998373305591362,\n \"acc_norm\": 0.7598039215686274,\n \"acc_norm_stderr\": 0.02998373305591362\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.7805907172995781,\n \"acc_stderr\": 0.026939106581553945,\n \"acc_norm\": 0.7805907172995781,\n \"acc_norm_stderr\": 0.026939106581553945\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6681614349775785,\n \"acc_stderr\": 0.03160295143776679,\n \"acc_norm\": 0.6681614349775785,\n \"acc_norm_stderr\": 0.03160295143776679\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.6946564885496184,\n \"acc_stderr\": 0.040393149787245605,\n \"acc_norm\": 0.6946564885496184,\n \"acc_norm_stderr\": 0.040393149787245605\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.768595041322314,\n \"acc_stderr\": 0.03849856098794088,\n \"acc_norm\": 0.768595041322314,\n \"acc_norm_stderr\": 0.03849856098794088\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.7129629629629629,\n \"acc_stderr\": 0.043733130409147614,\n \"acc_norm\": 0.7129629629629629,\n \"acc_norm_stderr\": 0.043733130409147614\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.6871165644171779,\n \"acc_stderr\": 0.036429145782924055,\n \"acc_norm\": 0.6871165644171779,\n \"acc_norm_stderr\": 0.036429145782924055\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.4642857142857143,\n \"acc_stderr\": 0.04733667890053756,\n \"acc_norm\": 0.4642857142857143,\n \"acc_norm_stderr\": 0.04733667890053756\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.7475728155339806,\n \"acc_stderr\": 0.04301250399690878,\n \"acc_norm\": 0.7475728155339806,\n \"acc_norm_stderr\": 0.04301250399690878\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8461538461538461,\n \"acc_stderr\": 0.02363687331748927,\n \"acc_norm\": 0.8461538461538461,\n \"acc_norm_stderr\": 0.02363687331748927\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.63,\n \"acc_stderr\": 0.04852365870939099,\n \"acc_norm\": 0.63,\n \"acc_norm_stderr\": 0.04852365870939099\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.7879948914431673,\n \"acc_stderr\": 0.01461609938583368,\n \"acc_norm\": 0.7879948914431673,\n \"acc_norm_stderr\": 0.01461609938583368\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.6705202312138728,\n \"acc_stderr\": 0.025305258131879695,\n \"acc_norm\": 0.6705202312138728,\n \"acc_norm_stderr\": 0.025305258131879695\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.2424581005586592,\n \"acc_stderr\": 0.01433352205921789,\n \"acc_norm\": 0.2424581005586592,\n \"acc_norm_stderr\": 0.01433352205921789\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.696078431372549,\n \"acc_stderr\": 0.02633661346904663,\n \"acc_norm\": 0.696078431372549,\n \"acc_norm_stderr\": 0.02633661346904663\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.6688102893890675,\n \"acc_stderr\": 0.02673062072800491,\n \"acc_norm\": 0.6688102893890675,\n \"acc_norm_stderr\": 0.02673062072800491\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.6728395061728395,\n \"acc_stderr\": 0.026105673861409825,\n \"acc_norm\": 0.6728395061728395,\n \"acc_norm_stderr\": 0.026105673861409825\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.4645390070921986,\n \"acc_stderr\": 0.029752389657427047,\n \"acc_norm\": 0.4645390070921986,\n \"acc_norm_stderr\": 0.029752389657427047\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.4276401564537158,\n \"acc_stderr\": 0.012635799922765846,\n \"acc_norm\": 0.4276401564537158,\n \"acc_norm_stderr\": 0.012635799922765846\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.6139705882352942,\n \"acc_stderr\": 0.029573269134411124,\n \"acc_norm\": 0.6139705882352942,\n \"acc_norm_stderr\": 0.029573269134411124\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.5915032679738562,\n \"acc_stderr\": 0.01988622103750187,\n \"acc_norm\": 0.5915032679738562,\n \"acc_norm_stderr\": 0.01988622103750187\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6454545454545455,\n \"acc_stderr\": 0.045820048415054174,\n \"acc_norm\": 0.6454545454545455,\n \"acc_norm_stderr\": 0.045820048415054174\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.7183673469387755,\n \"acc_stderr\": 0.02879518557429129,\n \"acc_norm\": 0.7183673469387755,\n \"acc_norm_stderr\": 0.02879518557429129\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.8208955223880597,\n \"acc_stderr\": 0.027113286753111837,\n \"acc_norm\": 0.8208955223880597,\n \"acc_norm_stderr\": 0.027113286753111837\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.77,\n \"acc_stderr\": 0.04229525846816506,\n \"acc_norm\": 0.77,\n \"acc_norm_stderr\": 0.04229525846816506\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.4939759036144578,\n \"acc_stderr\": 0.03892212195333045,\n \"acc_norm\": 0.4939759036144578,\n \"acc_norm_stderr\": 0.03892212195333045\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8011695906432749,\n \"acc_stderr\": 0.030611116557432528,\n \"acc_norm\": 0.8011695906432749,\n \"acc_norm_stderr\": 0.030611116557432528\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.2692778457772338,\n \"mc1_stderr\": 0.015528566637087295,\n \"mc2\": 0.4032625331106103,\n \"mc2_stderr\": 0.01398363920569579\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7995264404104183,\n \"acc_stderr\": 0.011251958281205083\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.47384382107657314,\n \"acc_stderr\": 0.013753627037255045\n }\n}\n```", "repo_url": "https://huggingface.co/Deci/DeciLM-7B", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|arc:challenge|25_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|gsm8k|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hellaswag|10_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-11T13-05-55.242370.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["**/details_harness|winogrande|5_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-11T13-05-55.242370.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_11T13_05_55.242370", "path": ["results_2023-12-11T13-05-55.242370.parquet"]}, {"split": "latest", "path": ["results_2023-12-11T13-05-55.242370.parquet"]}]}]}
2023-12-12T13:55:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of Deci/DeciLM-7B ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model Deci/DeciLM-7B on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-11T13:05:55.242370(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of Deci/DeciLM-7B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model Deci/DeciLM-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-11T13:05:55.242370(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of Deci/DeciLM-7B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model Deci/DeciLM-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-11T13:05:55.242370(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 16, 31, 165, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of Deci/DeciLM-7B## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model Deci/DeciLM-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-11T13:05:55.242370(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
061f89a5bc4c64870aaa1c268dd854b19fb1afc1
# UltraFeedback - Multi-Binarized using the Average of Preference Ratings (Cleaned) This dataset represents a new iteration on top of [`argilla/ultrafeedback-binarized-preferences-cleaned`](https://huggingface.co/datasets/argilla/ultrafeedback-binarized-preferences-cleaned), and has been created to explore whether DPO fine-tuning with more than one rejection per chosen response helps the model perform better in the AlpacaEval, MT-Bench, and LM Eval Harness benchmarks. Read more about Argilla's approach towards UltraFeedback binarization at [`argilla/ultrafeedback-binarized-preferences/README.md`](https://huggingface.co/datasets/argilla/ultrafeedback-binarized-preferences/blob/main/README.md), and about the parent approach of this one at [`argilla/ultrafeedback-binarized-preferences-cleaned/README.md`](https://huggingface.co/datasets/argilla/ultrafeedback-binarized-preferences-cleaned/blob/main/README.md), ## Differences with `argilla/ultrafeedback-binarized-preferences` Thanks to the recent issue identified by [AllenAI](https://huggingface.co/allenai) related to the TruthfulQA contamination within the original UltraFeedback dataset due to some prompts being reused from the TruthfulQA dataset (used for benchmarking in the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) from HuggingFace H4), we also decided to follow AllenAI's advice and remove those from the UltraFeedback dataset that we binarized using a completely different approach, which implied using the average of the preference ratings rather than the critique overall score, as [`HuggingFaceH4/ultrafeedback_binarized`](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized) did. Besides that, we also saw that not only the rows with the `source=truthful_qa` were contamined (for obvious reasons), but also some coming from ShareGPT, so we also removed those doing a left join with both subsets from the [`truthful_qa`](https://huggingface.co/datasets/truthful_qa) dataset. Finally, we also modified the formatting to be aligned with both [`HuggingFaceH4/ultrafeedback_binarized`](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized), and [`allenai/ultrafeedback_binarized_cleaned`](https://huggingface.co/datasets/allenai/ultrafeedback_binarized_cleaned) in order to ease the integration within the [`huggingface/alignment-handbook`](https://github.com/huggingface/alignment-handbook) so that the formatting is standardized. ## Differences with `argilla/ultrafeedback-binarized-preferences-cleaned` We kept the same pre-processing steps for cleaning the [`openbmb/UltraFeedback`](https://huggingface.co/datasets/openbmb/UltraFeedback) as well as the same preference rating calculation towards deciding whether to select a response as chosen or rejected, the difference is that this one is using a multi-binarization approach, where each chosen response has its own row with one different row depending on the number of rejected responses, meaning that for the same prompt we will have the same chosen response and different rejected responses. ## Reproduce <a target="_blank" href="https://colab.research.google.com/drive/1CTvQq_HmwuUPTuAboGLFDtqcel4xef-g?usp=sharing"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> To reproduce the data processing combining both our approach and the suggestions from HuggingFace H4 w.r.t. the formatting and the ones from AllenAI to remove the TruthfulQA contamination, feel free to run the attached Colab Notebook or just view it at [`notebook.ipynb`](./notebook.ipynb) within this repository. From Argilla we encourage anyone out there to play around, investigate, and experiment with the data, and we firmly believe on open sourcing what we do, as ourselves, as well as the whole community, benefit a lot from open source and we also want to give back. ## Citation If you find this dataset is useful in your work, please cite the original UltraFeedback dataset: https://huggingface.co/datasets/openbmb/UltraFeedback Additionally, you may also want to cite our work with Notus 7B, which lead the curation of the UltraFeedback dataset: ```bibtex @misc{notus2023, author = {Alvaro Bartolome and Gabriel Martin and Daniel Vila}, title = {Notus}, year = {2023}, publisher = {GitHub}, journal = {GitHub Repository}, howpublished = {\url{https://github.com/argilla-io/notus}} } ``` > Alphabetically ordered by last name due to equal contribution.
argilla/ultrafeedback-multi-binarized-preferences-cleaned
[ "task_categories:text-generation", "size_categories:100K<n<1M", "language:en", "license:mit", "dpo", "preference", "ultrafeedback", "region:us" ]
2023-12-11T14:04:24+00:00
{"language": ["en"], "license": "mit", "size_categories": ["100K<n<1M"], "task_categories": ["text-generation"], "pretty_name": "UltraFeedback Multi-Binarized Preferences Cleaned", "dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "chosen", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "chosen-rating", "dtype": "float64"}, {"name": "chosen-model", "dtype": "string"}, {"name": "rejected", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "rejected-rating", "dtype": "float64"}, {"name": "rejected-model", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 738122612, "num_examples": 157675}], "download_size": 196872615, "dataset_size": 738122612}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "tags": ["dpo", "preference", "ultrafeedback"]}
2023-12-11T14:21:14+00:00
[]
[ "en" ]
TAGS #task_categories-text-generation #size_categories-100K<n<1M #language-English #license-mit #dpo #preference #ultrafeedback #region-us
# UltraFeedback - Multi-Binarized using the Average of Preference Ratings (Cleaned) This dataset represents a new iteration on top of 'argilla/ultrafeedback-binarized-preferences-cleaned', and has been created to explore whether DPO fine-tuning with more than one rejection per chosen response helps the model perform better in the AlpacaEval, MT-Bench, and LM Eval Harness benchmarks. Read more about Argilla's approach towards UltraFeedback binarization at 'argilla/ultrafeedback-binarized-preferences/URL', and about the parent approach of this one at 'argilla/ultrafeedback-binarized-preferences-cleaned/URL', ## Differences with 'argilla/ultrafeedback-binarized-preferences' Thanks to the recent issue identified by AllenAI related to the TruthfulQA contamination within the original UltraFeedback dataset due to some prompts being reused from the TruthfulQA dataset (used for benchmarking in the Open LLM Leaderboard from HuggingFace H4), we also decided to follow AllenAI's advice and remove those from the UltraFeedback dataset that we binarized using a completely different approach, which implied using the average of the preference ratings rather than the critique overall score, as 'HuggingFaceH4/ultrafeedback_binarized' did. Besides that, we also saw that not only the rows with the 'source=truthful_qa' were contamined (for obvious reasons), but also some coming from ShareGPT, so we also removed those doing a left join with both subsets from the 'truthful_qa' dataset. Finally, we also modified the formatting to be aligned with both 'HuggingFaceH4/ultrafeedback_binarized', and 'allenai/ultrafeedback_binarized_cleaned' in order to ease the integration within the 'huggingface/alignment-handbook' so that the formatting is standardized. ## Differences with 'argilla/ultrafeedback-binarized-preferences-cleaned' We kept the same pre-processing steps for cleaning the 'openbmb/UltraFeedback' as well as the same preference rating calculation towards deciding whether to select a response as chosen or rejected, the difference is that this one is using a multi-binarization approach, where each chosen response has its own row with one different row depending on the number of rejected responses, meaning that for the same prompt we will have the same chosen response and different rejected responses. ## Reproduce <a target="_blank" href="URL <img src="URL alt="Open In Colab"/> </a> To reproduce the data processing combining both our approach and the suggestions from HuggingFace H4 w.r.t. the formatting and the ones from AllenAI to remove the TruthfulQA contamination, feel free to run the attached Colab Notebook or just view it at 'URL' within this repository. From Argilla we encourage anyone out there to play around, investigate, and experiment with the data, and we firmly believe on open sourcing what we do, as ourselves, as well as the whole community, benefit a lot from open source and we also want to give back. If you find this dataset is useful in your work, please cite the original UltraFeedback dataset: URL Additionally, you may also want to cite our work with Notus 7B, which lead the curation of the UltraFeedback dataset: > Alphabetically ordered by last name due to equal contribution.
[ "# UltraFeedback - Multi-Binarized using the Average of Preference Ratings (Cleaned)\n\nThis dataset represents a new iteration on top of 'argilla/ultrafeedback-binarized-preferences-cleaned',\nand has been created to explore whether DPO fine-tuning with more than one rejection per chosen response helps the model perform better in the \nAlpacaEval, MT-Bench, and LM Eval Harness benchmarks.\n\nRead more about Argilla's approach towards UltraFeedback binarization at 'argilla/ultrafeedback-binarized-preferences/URL',\nand about the parent approach of this one at 'argilla/ultrafeedback-binarized-preferences-cleaned/URL',", "## Differences with 'argilla/ultrafeedback-binarized-preferences'\n\nThanks to the recent issue identified by AllenAI related to the TruthfulQA contamination within the\noriginal UltraFeedback dataset due to some prompts being reused from the TruthfulQA dataset (used for benchmarking\nin the Open LLM Leaderboard from HuggingFace H4), we also decided\nto follow AllenAI's advice and remove those from the UltraFeedback dataset that we binarized using a completely different approach, which\nimplied using the average of the preference ratings rather than the critique overall score, as\n'HuggingFaceH4/ultrafeedback_binarized' did.\n\nBesides that, we also saw that not only the rows with the 'source=truthful_qa' were contamined (for obvious reasons), but also some\ncoming from ShareGPT, so we also removed those doing a left join with both subsets from the 'truthful_qa' dataset.\n\nFinally, we also modified the formatting to be aligned with both 'HuggingFaceH4/ultrafeedback_binarized',\nand 'allenai/ultrafeedback_binarized_cleaned' in order to ease\nthe integration within the 'huggingface/alignment-handbook' so that the formatting is standardized.", "## Differences with 'argilla/ultrafeedback-binarized-preferences-cleaned'\n\nWe kept the same pre-processing steps for cleaning the 'openbmb/UltraFeedback' as well as the same preference\nrating calculation towards deciding whether to select a response as chosen or rejected, the difference is that this one is using a multi-binarization approach, where each chosen response has its own row with one different row\ndepending on the number of rejected responses, meaning that for the same prompt we will have the same chosen response and different rejected responses.", "## Reproduce\n\n<a target=\"_blank\" href=\"URL\n <img src=\"URL alt=\"Open In Colab\"/>\n</a>\n\nTo reproduce the data processing combining both our approach and the suggestions from HuggingFace H4 w.r.t. the formatting and the ones from AllenAI to\nremove the TruthfulQA contamination, feel free to run the attached Colab Notebook or just view it at 'URL' within this repository.\n\nFrom Argilla we encourage anyone out there to play around, investigate, and experiment with the data, and we firmly believe on open sourcing what we do, as\nourselves, as well as the whole community, benefit a lot from open source and we also want to give back.\n\nIf you find this dataset is useful in your work, please cite the original UltraFeedback dataset: URL\n\nAdditionally, you may also want to cite our work with Notus 7B, which lead the curation of the UltraFeedback dataset:\n\n\n\n> Alphabetically ordered by last name due to equal contribution." ]
[ "TAGS\n#task_categories-text-generation #size_categories-100K<n<1M #language-English #license-mit #dpo #preference #ultrafeedback #region-us \n", "# UltraFeedback - Multi-Binarized using the Average of Preference Ratings (Cleaned)\n\nThis dataset represents a new iteration on top of 'argilla/ultrafeedback-binarized-preferences-cleaned',\nand has been created to explore whether DPO fine-tuning with more than one rejection per chosen response helps the model perform better in the \nAlpacaEval, MT-Bench, and LM Eval Harness benchmarks.\n\nRead more about Argilla's approach towards UltraFeedback binarization at 'argilla/ultrafeedback-binarized-preferences/URL',\nand about the parent approach of this one at 'argilla/ultrafeedback-binarized-preferences-cleaned/URL',", "## Differences with 'argilla/ultrafeedback-binarized-preferences'\n\nThanks to the recent issue identified by AllenAI related to the TruthfulQA contamination within the\noriginal UltraFeedback dataset due to some prompts being reused from the TruthfulQA dataset (used for benchmarking\nin the Open LLM Leaderboard from HuggingFace H4), we also decided\nto follow AllenAI's advice and remove those from the UltraFeedback dataset that we binarized using a completely different approach, which\nimplied using the average of the preference ratings rather than the critique overall score, as\n'HuggingFaceH4/ultrafeedback_binarized' did.\n\nBesides that, we also saw that not only the rows with the 'source=truthful_qa' were contamined (for obvious reasons), but also some\ncoming from ShareGPT, so we also removed those doing a left join with both subsets from the 'truthful_qa' dataset.\n\nFinally, we also modified the formatting to be aligned with both 'HuggingFaceH4/ultrafeedback_binarized',\nand 'allenai/ultrafeedback_binarized_cleaned' in order to ease\nthe integration within the 'huggingface/alignment-handbook' so that the formatting is standardized.", "## Differences with 'argilla/ultrafeedback-binarized-preferences-cleaned'\n\nWe kept the same pre-processing steps for cleaning the 'openbmb/UltraFeedback' as well as the same preference\nrating calculation towards deciding whether to select a response as chosen or rejected, the difference is that this one is using a multi-binarization approach, where each chosen response has its own row with one different row\ndepending on the number of rejected responses, meaning that for the same prompt we will have the same chosen response and different rejected responses.", "## Reproduce\n\n<a target=\"_blank\" href=\"URL\n <img src=\"URL alt=\"Open In Colab\"/>\n</a>\n\nTo reproduce the data processing combining both our approach and the suggestions from HuggingFace H4 w.r.t. the formatting and the ones from AllenAI to\nremove the TruthfulQA contamination, feel free to run the attached Colab Notebook or just view it at 'URL' within this repository.\n\nFrom Argilla we encourage anyone out there to play around, investigate, and experiment with the data, and we firmly believe on open sourcing what we do, as\nourselves, as well as the whole community, benefit a lot from open source and we also want to give back.\n\nIf you find this dataset is useful in your work, please cite the original UltraFeedback dataset: URL\n\nAdditionally, you may also want to cite our work with Notus 7B, which lead the curation of the UltraFeedback dataset:\n\n\n\n> Alphabetically ordered by last name due to equal contribution." ]
[ 49, 181, 300, 139, 233 ]
[ "passage: TAGS\n#task_categories-text-generation #size_categories-100K<n<1M #language-English #license-mit #dpo #preference #ultrafeedback #region-us \n# UltraFeedback - Multi-Binarized using the Average of Preference Ratings (Cleaned)\n\nThis dataset represents a new iteration on top of 'argilla/ultrafeedback-binarized-preferences-cleaned',\nand has been created to explore whether DPO fine-tuning with more than one rejection per chosen response helps the model perform better in the \nAlpacaEval, MT-Bench, and LM Eval Harness benchmarks.\n\nRead more about Argilla's approach towards UltraFeedback binarization at 'argilla/ultrafeedback-binarized-preferences/URL',\nand about the parent approach of this one at 'argilla/ultrafeedback-binarized-preferences-cleaned/URL'," ]
db3f2582c3d4219059f00b650f68968486034aee
# Dataset Card for "librispeech_asr-audiodec_44k" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
anthony-wss/librispeech_asr-audiodec_44k
[ "region:us" ]
2023-12-11T14:11:39+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train.clean.360", "path": "data/train.clean.360-*"}, {"split": "train.other.500", "path": "data/train.other.500-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "unit", "sequence": {"sequence": "int64"}}], "splits": [{"name": "train.clean.360", "num_bytes": 10788010668, "num_examples": 104014}, {"name": "train.other.500", "num_bytes": 14756337873, "num_examples": 148688}], "download_size": 3925792960, "dataset_size": 25544348541}}
2023-12-13T04:31:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "librispeech_asr-audiodec_44k" More Information needed
[ "# Dataset Card for \"librispeech_asr-audiodec_44k\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"librispeech_asr-audiodec_44k\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"librispeech_asr-audiodec_44k\"\n\nMore Information needed" ]
aebfdb54dcbdb99dc0ef6b2e85f7d2237957acde
# Can It Edit? Evaluating the Ability of Large Language Models to Follow Code Editing Instructions CanItEdit is a benchmark for evaluating LLMs on instructional code editing, the task of updating a program given a natural language instruction. The benchmark contains 54 hand-crafted Python programs with before and after code blocks, two types of natural language instructions (descriptive and lazy), and a hidden test suite. The dataset’s dual natural language instructions test model efficiency in two scenarios: 1) Descriptive: Detailed instructions replicate situations where users provide specific specifications or another model outlines a plan, similar to Reflexion prompting, 2) Lazy: Informal instructions resemble typical user queries for LLMs in code generation. For more information and results see [our paper](https://federico.codes/assets/papers/canitedit.pdf). ## How To Evaluate All the code for evaluating the benchmark can be found in our [GitHub repository](https://github.com/nuprl/CanItEdit).
nuprl/CanItEdit
[ "task_categories:text2text-generation", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:original", "language:en", "license:mit", "code-generation", "region:us" ]
2023-12-11T14:13:35+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["expert-generated"], "language": ["en"], "license": ["mit"], "multilinguality": ["monolingual"], "size_categories": ["n<1K"], "source_datasets": ["original"], "task_categories": ["text2text-generation"], "task_ids": [], "paperswithcode_id": "canitedit", "pretty_name": "CanItEdit", "tags": ["code-generation"], "dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "name", "dtype": "string"}, {"name": "full_name", "dtype": "string"}, {"name": "before", "dtype": "string"}, {"name": "after", "dtype": "string"}, {"name": "tests", "dtype": "string"}, {"name": "instruction_descriptive", "dtype": "string"}, {"name": "instruction_humane", "dtype": "string"}, {"name": "taxonomy", "struct": [{"name": "change_kind", "dtype": "string"}, {"name": "libraries", "sequence": "string"}, {"name": "topic", "dtype": "string"}]}], "splits": [{"name": "test", "num_bytes": 301982, "num_examples": 54}], "download_size": 136181, "dataset_size": 301982}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]}
2023-12-14T20:57:48+00:00
[]
[ "en" ]
TAGS #task_categories-text2text-generation #annotations_creators-expert-generated #language_creators-expert-generated #multilinguality-monolingual #size_categories-n<1K #source_datasets-original #language-English #license-mit #code-generation #region-us
# Can It Edit? Evaluating the Ability of Large Language Models to Follow Code Editing Instructions CanItEdit is a benchmark for evaluating LLMs on instructional code editing, the task of updating a program given a natural language instruction. The benchmark contains 54 hand-crafted Python programs with before and after code blocks, two types of natural language instructions (descriptive and lazy), and a hidden test suite. The dataset’s dual natural language instructions test model efficiency in two scenarios: 1) Descriptive: Detailed instructions replicate situations where users provide specific specifications or another model outlines a plan, similar to Reflexion prompting, 2) Lazy: Informal instructions resemble typical user queries for LLMs in code generation. For more information and results see our paper. ## How To Evaluate All the code for evaluating the benchmark can be found in our GitHub repository.
[ "# Can It Edit? Evaluating the Ability of Large Language Models to Follow Code Editing Instructions\nCanItEdit is a benchmark for evaluating LLMs on instructional code editing, the task of updating a program given a natural language instruction. The benchmark contains 54 hand-crafted Python programs with before and after code blocks, two types of natural language instructions (descriptive and lazy), and a hidden test suite.\n\nThe dataset’s dual natural language instructions test model efficiency in two scenarios: \n1) Descriptive: Detailed instructions replicate situations where users provide specific specifications or\nanother model outlines a plan, similar to Reflexion prompting,\n2) Lazy: Informal instructions resemble typical user queries\nfor LLMs in code generation.\n\nFor more information and results see our paper.", "## How To Evaluate\nAll the code for evaluating the benchmark can be found in our GitHub repository." ]
[ "TAGS\n#task_categories-text2text-generation #annotations_creators-expert-generated #language_creators-expert-generated #multilinguality-monolingual #size_categories-n<1K #source_datasets-original #language-English #license-mit #code-generation #region-us \n", "# Can It Edit? Evaluating the Ability of Large Language Models to Follow Code Editing Instructions\nCanItEdit is a benchmark for evaluating LLMs on instructional code editing, the task of updating a program given a natural language instruction. The benchmark contains 54 hand-crafted Python programs with before and after code blocks, two types of natural language instructions (descriptive and lazy), and a hidden test suite.\n\nThe dataset’s dual natural language instructions test model efficiency in two scenarios: \n1) Descriptive: Detailed instructions replicate situations where users provide specific specifications or\nanother model outlines a plan, similar to Reflexion prompting,\n2) Lazy: Informal instructions resemble typical user queries\nfor LLMs in code generation.\n\nFor more information and results see our paper.", "## How To Evaluate\nAll the code for evaluating the benchmark can be found in our GitHub repository." ]
[ 83, 175, 25 ]
[ "passage: TAGS\n#task_categories-text2text-generation #annotations_creators-expert-generated #language_creators-expert-generated #multilinguality-monolingual #size_categories-n<1K #source_datasets-original #language-English #license-mit #code-generation #region-us \n# Can It Edit? Evaluating the Ability of Large Language Models to Follow Code Editing Instructions\nCanItEdit is a benchmark for evaluating LLMs on instructional code editing, the task of updating a program given a natural language instruction. The benchmark contains 54 hand-crafted Python programs with before and after code blocks, two types of natural language instructions (descriptive and lazy), and a hidden test suite.\n\nThe dataset’s dual natural language instructions test model efficiency in two scenarios: \n1) Descriptive: Detailed instructions replicate situations where users provide specific specifications or\nanother model outlines a plan, similar to Reflexion prompting,\n2) Lazy: Informal instructions resemble typical user queries\nfor LLMs in code generation.\n\nFor more information and results see our paper.## How To Evaluate\nAll the code for evaluating the benchmark can be found in our GitHub repository." ]
18c319deef96d0b0af2702a8f99c38b1ea37a41c
# Dataset Card for "Alis" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ArasAyen/Alis
[ "region:us" ]
2023-12-11T14:14:53+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 974600521.0, "num_examples": 659}], "download_size": 974328268, "dataset_size": 974600521.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-11T14:15:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Alis" More Information needed
[ "# Dataset Card for \"Alis\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Alis\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Alis\"\n\nMore Information needed" ]
eb407d5702e518776bd3cad5959c610b139e16dc
### dataset source excellent sharing: https://github.com/cbaziotis/datastories-semeval2017-task4/tree/master/dataset/Subtask_A/4A-English task government: https://alt.qcri.org/semeval2017/task4/index.php?id=data-and-tools
Siki-77/twitter2017
[ "license:apache-2.0", "region:us" ]
2023-12-11T14:18:59+00:00
{"license": "apache-2.0"}
2023-12-17T09:03:49+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
### dataset source excellent sharing: URL task government: URL
[ "### dataset source\nexcellent sharing:\nURL\n\ntask government:\nURL" ]
[ "TAGS\n#license-apache-2.0 #region-us \n", "### dataset source\nexcellent sharing:\nURL\n\ntask government:\nURL" ]
[ 14, 13 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n### dataset source\nexcellent sharing:\nURL\n\ntask government:\nURL" ]
13ac0a9c60d115274592c54f5c69fdadcd782e23
WARNING: EXTREMELY WORK IN PROGRESS. NOT YET USEABLE; HAVENT REMOVED RLHF INSTANCES YET.
unaidedelf87777/slimorca-sem_deduped
[ "region:us" ]
2023-12-11T14:27:52+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "system_message", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "completion", "dtype": "string"}, {"name": "meta", "struct": [{"name": "topic_depth_1", "dtype": "string"}, {"name": "topic_depth_2", "dtype": "string"}, {"name": "topic_depth_3", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 834398137, "num_examples": 477358}], "download_size": 423106996, "dataset_size": 834398137}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-12T17:46:46+00:00
[]
[]
TAGS #region-us
WARNING: EXTREMELY WORK IN PROGRESS. NOT YET USEABLE; HAVENT REMOVED RLHF INSTANCES YET.
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
4696f11b9c3746aa5877d8e114eaf449f78b131d
# Dataset Card for openslr-slr69-ca-denoised This is a post-processed version of the Catalan subset belonging to the [Open Speech and Language Resources (OpenSLR)](https://www.openslr.org/index.html) speech dataset. Specifically the subset [OpenSLR-69](https://www.openslr.org/69). The original HF🤗 SLR-69 dataset is located [here](https://huggingface.co/datasets/openslr/viewer/SLR69). Same license is maintained: [Attribution-ShareAlike 4.0 International](https://creativecommons.org/licenses/by/4.0/). ## Dataset Details ### Dataset Description We processed the data of the Catalan OpenSLR with the following recipe: - **Trimming:** Long silences from the start and the end of clips have been removed. - [py-webrtcvad](https://pypi.org/project/webrtcvad/) -> Python interface to the Voice Activity Detector (VAD) developed by Google for the WebRTC. - **Resampling:** From 48000 Hz to 22050 Hz, which is the most common sampling rate for training TTS models - Resampler from [CoquiTTS](https://github.com/coqui-ai/TTS/tree/dev) framework - **Denoising:** Although base quality of the audios is high, we could remove some background noise and small artifcats thanks to the CleanUNet denoiser developed by NVIDIA. - [CleanUNet](https://github.com/NVIDIA/CleanUNet) - [arXiv](https://arxiv.org/abs/2202.07790) We kept the same number of wave files, also the original anonymized file names and transcriptions. ## Uses The purpose of this dataset is mainly for training text-to-speech and automatic speech recognition models in Catalan. ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> The dataset consists of a single split, providing audios and transcriptions: ``` DatasetDict({ train: Dataset({ features: ['audio', 'transcription'], num_rows: 4240 }) }) ``` Each data point is structured as: ``` >> data['train'][0]['audio'] {'path': 'caf_09901_01619988267.wav', 'array': array([-3.05175781e-05, -3.05175781e-05, -3.05175781e-05, ..., -6.10351562e-05, -6.10351562e-05, -6.10351562e-05]) 'sampling_rate': 22050} >> data['train'][0]['transcription'] "L'òpera de Sydney es troba a l'entrada de la badia" ``` ### Dataset Splits - ```audio (dict)```: A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: ```dataset[0]["audio"]``` the audio file is automatically decoded and resampled to ```dataset.features["audio"].sampling_rate```. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus, it is important to first query the sample index before the "audio" column, i.e. ```dataset[0]["audio"]``` should always be preferred over ```dataset["audio"][0]```. * ```path (str)```: The path to the audio file. * ```array (array)```: Decoded audio array. * ```sampling_rate (int)```: Audio sampling rate. - ```transcription (str)```: The sentence the user was prompted to speak. ## Dataset Creation ### Source Data *SLR69: Crowdsourced high-quality Catalan multi-speaker speech data set* This data set contains transcribed high-quality audio of Catalan sentences recorded by volunteers. The recordings were prepared with the help of Direcció General de Política Lingüística del Departament de Cultura, Generalitat de Catalunya. The data set consists of wave files, and a TSV file (line_index.tsv). The file line_index.tsv contains an anonymized FileID and the transcription of audio in the file. The data set has been manually quality checked, but there might still be errors. Please report any issues in the following issue tracker on GitHub. https://github.com/googlei18n/language-resources/issues The original dataset is distributed under Creative Commons Attribution-ShareAlike 4.0 International Public License. See [LICENSE](https://www.openslr.org/resources/69/LICENSE) file and [https://github.com/google/language-resources#license](https://github.com/google/language-resources#license) for license information. #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> This is a post-processed version of the Catalan [OpenSLR-69](https://www.openslr.org/69) dataset. For more inormation about the original data collection and processing refer to [this paper](https://aclanthology.org/2020.sltu-1.3/). #### Who are the source data producers? Copyright 2018, 2019 Google, Inc. Copyright 2023 Language Technologies Unit (LangTech) at Barcelona Supercomputing Center ### Annotations <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> (N/A) #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> The dataset consists of people who have donated their voice online. You agree to not attempt to determine the identity of speakers in this dataset. ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation The original paper where authors detail how OpenSLR-69 was generated: ``` @inproceedings{kjartansson-etal-2020-open, title = {{Open-Source High Quality Speech Datasets for Basque, Catalan and Galician}}, author = {Kjartansson, Oddur and Gutkin, Alexander and Butryna, Alena and Demirsahin, Isin and Rivera, Clara}, booktitle = {Proceedings of the 1st Joint Workshop on Spoken Language Technologies for Under-resourced languages (SLTU) and Collaboration and Computing for Under-Resourced Languages (CCURL)}, year = {2020}, pages = {21--27}, month = may, address = {Marseille, France}, publisher = {European Language Resources association (ELRA)}, url = {https://www.aclweb.org/anthology/2020.sltu-1.3}, ISBN = {979-10-95546-35-1}, } ``` **APA:** ## Funding This work has been promoted and financed by the Generalitat de Catalunya through the [Aina project] (https://projecteaina.cat/). ## Dataset Card Contact [email protected]
projecte-aina/openslr-slr69-ca-trimmed-denoised
[ "task_categories:text-to-speech", "annotations_creators:no-annotation", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:openslr", "language:ca", "license:cc-by-sa-4.0", "arxiv:2202.07790", "doi:10.57967/hf/1493", "region:us" ]
2023-12-11T14:43:28+00:00
{"annotations_creators": ["no-annotation"], "language_creators": ["crowdsourced"], "language": ["ca"], "license": ["cc-by-sa-4.0"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": "openslr", "task_categories": ["text-to-speech"], "task_ids": [], "pretty_name": "openslr-slr69-ca-trimmed-denoised", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "transcription", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 811311975.4, "num_examples": 4240}], "download_size": 721217811, "dataset_size": 811311975.4}}
2024-01-17T17:00:46+00:00
[ "2202.07790" ]
[ "ca" ]
TAGS #task_categories-text-to-speech #annotations_creators-no-annotation #language_creators-crowdsourced #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-openslr #language-Catalan #license-cc-by-sa-4.0 #arxiv-2202.07790 #doi-10.57967/hf/1493 #region-us
# Dataset Card for openslr-slr69-ca-denoised This is a post-processed version of the Catalan subset belonging to the Open Speech and Language Resources (OpenSLR) speech dataset. Specifically the subset OpenSLR-69. The original HF SLR-69 dataset is located here. Same license is maintained: Attribution-ShareAlike 4.0 International. ## Dataset Details ### Dataset Description We processed the data of the Catalan OpenSLR with the following recipe: - Trimming: Long silences from the start and the end of clips have been removed. - py-webrtcvad -> Python interface to the Voice Activity Detector (VAD) developed by Google for the WebRTC. - Resampling: From 48000 Hz to 22050 Hz, which is the most common sampling rate for training TTS models - Resampler from CoquiTTS framework - Denoising: Although base quality of the audios is high, we could remove some background noise and small artifcats thanks to the CleanUNet denoiser developed by NVIDIA. - CleanUNet - arXiv We kept the same number of wave files, also the original anonymized file names and transcriptions. ## Uses The purpose of this dataset is mainly for training text-to-speech and automatic speech recognition models in Catalan. ## Dataset Structure The dataset consists of a single split, providing audios and transcriptions: Each data point is structured as: ### Dataset Splits - : A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: the audio file is automatically decoded and resampled to . Decoding and resampling of a large number of audio files might take a significant amount of time. Thus, it is important to first query the sample index before the "audio" column, i.e. should always be preferred over . * : The path to the audio file. * : Decoded audio array. * : Audio sampling rate. - : The sentence the user was prompted to speak. ## Dataset Creation ### Source Data *SLR69: Crowdsourced high-quality Catalan multi-speaker speech data set* This data set contains transcribed high-quality audio of Catalan sentences recorded by volunteers. The recordings were prepared with the help of Direcció General de Política Lingüística del Departament de Cultura, Generalitat de Catalunya. The data set consists of wave files, and a TSV file (line_index.tsv). The file line_index.tsv contains an anonymized FileID and the transcription of audio in the file. The data set has been manually quality checked, but there might still be errors. Please report any issues in the following issue tracker on GitHub. URL The original dataset is distributed under Creative Commons Attribution-ShareAlike 4.0 International Public License. See LICENSE file and URL for license information. #### Data Collection and Processing This is a post-processed version of the Catalan OpenSLR-69 dataset. For more inormation about the original data collection and processing refer to this paper. #### Who are the source data producers? Copyright 2018, 2019 Google, Inc. Copyright 2023 Language Technologies Unit (LangTech) at Barcelona Supercomputing Center ### Annotations (N/A) #### Personal and Sensitive Information The dataset consists of people who have donated their voice online. You agree to not attempt to determine the identity of speakers in this dataset. ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. The original paper where authors detail how OpenSLR-69 was generated: APA: ## Funding This work has been promoted and financed by the Generalitat de Catalunya through the [Aina project] (URL ## Dataset Card Contact langtech@URL
[ "# Dataset Card for openslr-slr69-ca-denoised\n\nThis is a post-processed version of the Catalan subset belonging to the Open Speech and Language Resources (OpenSLR) speech dataset. \nSpecifically the subset OpenSLR-69. \n\nThe original HF SLR-69 dataset is located here.\n\nSame license is maintained: Attribution-ShareAlike 4.0 International.", "## Dataset Details", "### Dataset Description\n\nWe processed the data of the Catalan OpenSLR with the following recipe:\n\n- Trimming: Long silences from the start and the end of clips have been removed.\n - py-webrtcvad -> Python interface to the Voice Activity Detector (VAD) developed by Google for the WebRTC.\n- Resampling: From 48000 Hz to 22050 Hz, which is the most common sampling rate for training TTS models\n - Resampler from CoquiTTS framework\n- Denoising: Although base quality of the audios is high, we could remove some background noise and small artifcats thanks to the CleanUNet denoiser developed by NVIDIA.\n - CleanUNet - arXiv\n\nWe kept the same number of wave files, also the original anonymized file names and transcriptions.", "## Uses\n\nThe purpose of this dataset is mainly for training text-to-speech and automatic speech recognition models in Catalan.", "## Dataset Structure\n\n\n\nThe dataset consists of a single split, providing audios and transcriptions:\n\nEach data point is structured as:", "### Dataset Splits\n\n- : A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: the audio file is automatically decoded and resampled to . Decoding and resampling of a large number of audio files might take a significant amount of time. Thus, it is important to first query the sample index before the \"audio\" column, i.e. should always be preferred over .\n \n * : The path to the audio file.\n * : Decoded audio array.\n * : Audio sampling rate.\n\n\n- : The sentence the user was prompted to speak.", "## Dataset Creation", "### Source Data\n\n*SLR69: Crowdsourced high-quality Catalan multi-speaker speech data set*\n\nThis data set contains transcribed high-quality audio of Catalan sentences recorded by volunteers. The recordings\nwere prepared with the help of Direcció General de Política Lingüística del Departament de Cultura, Generalitat de\nCatalunya. The data set consists of wave files, and a TSV file (line_index.tsv). The file line_index.tsv contains \nan anonymized FileID and the transcription of audio in the file.\n\nThe data set has been manually quality checked, but there might still be errors.\n\nPlease report any issues in the following issue tracker on GitHub. URL\n\nThe original dataset is distributed under Creative Commons Attribution-ShareAlike 4.0 International Public License.\nSee LICENSE file and \nURL for license information.", "#### Data Collection and Processing\n\n\nThis is a post-processed version of the Catalan OpenSLR-69 dataset. \nFor more inormation about the original data collection and processing refer to this paper.", "#### Who are the source data producers?\n\nCopyright 2018, 2019 Google, Inc.\n\nCopyright 2023 Language Technologies Unit (LangTech) at Barcelona Supercomputing Center", "### Annotations\n\n\n(N/A)", "#### Personal and Sensitive Information\n\n\nThe dataset consists of people who have donated their voice online. You agree to not attempt to determine the identity of speakers in this dataset.", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\nThe original paper where authors detail how OpenSLR-69 was generated: \n\n\nAPA:", "## Funding\nThis work has been promoted and financed by the Generalitat de Catalunya through the [Aina project] (URL", "## Dataset Card Contact\n\nlangtech@URL" ]
[ "TAGS\n#task_categories-text-to-speech #annotations_creators-no-annotation #language_creators-crowdsourced #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-openslr #language-Catalan #license-cc-by-sa-4.0 #arxiv-2202.07790 #doi-10.57967/hf/1493 #region-us \n", "# Dataset Card for openslr-slr69-ca-denoised\n\nThis is a post-processed version of the Catalan subset belonging to the Open Speech and Language Resources (OpenSLR) speech dataset. \nSpecifically the subset OpenSLR-69. \n\nThe original HF SLR-69 dataset is located here.\n\nSame license is maintained: Attribution-ShareAlike 4.0 International.", "## Dataset Details", "### Dataset Description\n\nWe processed the data of the Catalan OpenSLR with the following recipe:\n\n- Trimming: Long silences from the start and the end of clips have been removed.\n - py-webrtcvad -> Python interface to the Voice Activity Detector (VAD) developed by Google for the WebRTC.\n- Resampling: From 48000 Hz to 22050 Hz, which is the most common sampling rate for training TTS models\n - Resampler from CoquiTTS framework\n- Denoising: Although base quality of the audios is high, we could remove some background noise and small artifcats thanks to the CleanUNet denoiser developed by NVIDIA.\n - CleanUNet - arXiv\n\nWe kept the same number of wave files, also the original anonymized file names and transcriptions.", "## Uses\n\nThe purpose of this dataset is mainly for training text-to-speech and automatic speech recognition models in Catalan.", "## Dataset Structure\n\n\n\nThe dataset consists of a single split, providing audios and transcriptions:\n\nEach data point is structured as:", "### Dataset Splits\n\n- : A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: the audio file is automatically decoded and resampled to . Decoding and resampling of a large number of audio files might take a significant amount of time. Thus, it is important to first query the sample index before the \"audio\" column, i.e. should always be preferred over .\n \n * : The path to the audio file.\n * : Decoded audio array.\n * : Audio sampling rate.\n\n\n- : The sentence the user was prompted to speak.", "## Dataset Creation", "### Source Data\n\n*SLR69: Crowdsourced high-quality Catalan multi-speaker speech data set*\n\nThis data set contains transcribed high-quality audio of Catalan sentences recorded by volunteers. The recordings\nwere prepared with the help of Direcció General de Política Lingüística del Departament de Cultura, Generalitat de\nCatalunya. The data set consists of wave files, and a TSV file (line_index.tsv). The file line_index.tsv contains \nan anonymized FileID and the transcription of audio in the file.\n\nThe data set has been manually quality checked, but there might still be errors.\n\nPlease report any issues in the following issue tracker on GitHub. URL\n\nThe original dataset is distributed under Creative Commons Attribution-ShareAlike 4.0 International Public License.\nSee LICENSE file and \nURL for license information.", "#### Data Collection and Processing\n\n\nThis is a post-processed version of the Catalan OpenSLR-69 dataset. \nFor more inormation about the original data collection and processing refer to this paper.", "#### Who are the source data producers?\n\nCopyright 2018, 2019 Google, Inc.\n\nCopyright 2023 Language Technologies Unit (LangTech) at Barcelona Supercomputing Center", "### Annotations\n\n\n(N/A)", "#### Personal and Sensitive Information\n\n\nThe dataset consists of people who have donated their voice online. You agree to not attempt to determine the identity of speakers in this dataset.", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\nThe original paper where authors detail how OpenSLR-69 was generated: \n\n\nAPA:", "## Funding\nThis work has been promoted and financed by the Generalitat de Catalunya through the [Aina project] (URL", "## Dataset Card Contact\n\nlangtech@URL" ]
[ 109, 87, 4, 180, 28, 32, 159, 5, 183, 44, 36, 10, 40, 10, 54, 26, 9 ]
[ "passage: TAGS\n#task_categories-text-to-speech #annotations_creators-no-annotation #language_creators-crowdsourced #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-openslr #language-Catalan #license-cc-by-sa-4.0 #arxiv-2202.07790 #doi-10.57967/hf/1493 #region-us \n# Dataset Card for openslr-slr69-ca-denoised\n\nThis is a post-processed version of the Catalan subset belonging to the Open Speech and Language Resources (OpenSLR) speech dataset. \nSpecifically the subset OpenSLR-69. \n\nThe original HF SLR-69 dataset is located here.\n\nSame license is maintained: Attribution-ShareAlike 4.0 International.## Dataset Details### Dataset Description\n\nWe processed the data of the Catalan OpenSLR with the following recipe:\n\n- Trimming: Long silences from the start and the end of clips have been removed.\n - py-webrtcvad -> Python interface to the Voice Activity Detector (VAD) developed by Google for the WebRTC.\n- Resampling: From 48000 Hz to 22050 Hz, which is the most common sampling rate for training TTS models\n - Resampler from CoquiTTS framework\n- Denoising: Although base quality of the audios is high, we could remove some background noise and small artifcats thanks to the CleanUNet denoiser developed by NVIDIA.\n - CleanUNet - arXiv\n\nWe kept the same number of wave files, also the original anonymized file names and transcriptions.## Uses\n\nThe purpose of this dataset is mainly for training text-to-speech and automatic speech recognition models in Catalan.## Dataset Structure\n\n\n\nThe dataset consists of a single split, providing audios and transcriptions:\n\nEach data point is structured as:" ]
cd58f5fcb9e75bdc3e121726695d76b2c14b36ee
### Description This dataset is derived from the already existing dataset made by AI4Bharat. We have used the [IndicXParaphrase](https://huggingface.co/datasets/ai4bharat/IndicXParaphrase) dataset of AI4Bharat to create this instruction style dataset. We have used the malayalam split of the above mentioned dataset to create this one. This was created as part of [Aya Open Science Initiative](https://sites.google.com/cohere.com/aya-en/home) from Cohere For AI. IndicXParaphrase is multilingual, and n-way parallel dataset for paraphrase detection in 10 Indic languages. The original dataset(IndicXParaphrase) was made available under the cc-0 license. ### Template The following templates(Malayalam) where used for converting the original dataset: ``` #Template 1 prompt: ഇനിപ്പറയുന്ന വാചകം വ്യത്യസ്ത വാക്കുകളിൽ എഴുതുക: "{original_sentence}" completion: {paraphrased_sentence} ``` ``` #Template 2 prompt: ഇനിപ്പറയുന്ന വാചകം മറ്റൊരു രീതിയിൽ എഴുതുക: "{original_sentence}" completion: {paraphrased_sentence} ``` ``` #Template 3 prompt: താഴെപ്പറയുന്ന വാചകം പരാവർത്തനം ചെയ്യുക: "{original_sentence}" completion: {paraphrased_sentence} ```
el2e10/aya-paraphrase-malayalam
[ "task_categories:text-generation", "size_categories:n<1K", "source_datasets:extended|ai4bharat/IndicXParaphrase", "language:ml", "license:cc", "region:us" ]
2023-12-11T14:48:09+00:00
{"language": ["ml"], "license": "cc", "size_categories": ["n<1K"], "source_datasets": ["extended|ai4bharat/IndicXParaphrase"], "task_categories": ["text-generation"], "pretty_name": "Aya Paraphrase Malayalam", "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "template_lang", "dtype": "string"}, {"name": "template_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 710999, "num_examples": 1001}], "download_size": 255190, "dataset_size": 710999}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2024-01-26T14:14:25+00:00
[]
[ "ml" ]
TAGS #task_categories-text-generation #size_categories-n<1K #source_datasets-extended|ai4bharat/IndicXParaphrase #language-Malayalam #license-cc #region-us
### Description This dataset is derived from the already existing dataset made by AI4Bharat. We have used the IndicXParaphrase dataset of AI4Bharat to create this instruction style dataset. We have used the malayalam split of the above mentioned dataset to create this one. This was created as part of Aya Open Science Initiative from Cohere For AI. IndicXParaphrase is multilingual, and n-way parallel dataset for paraphrase detection in 10 Indic languages. The original dataset(IndicXParaphrase) was made available under the cc-0 license. ### Template The following templates(Malayalam) where used for converting the original dataset:
[ "### Description\n\nThis dataset is derived from the already existing dataset made by AI4Bharat. We have used the IndicXParaphrase dataset of AI4Bharat to create this instruction style dataset. \nWe have used the malayalam split of the above mentioned dataset to create this one. This was created as part of Aya Open Science Initiative from Cohere For AI.\n\nIndicXParaphrase is multilingual, and n-way parallel dataset for paraphrase detection in 10 Indic languages. The original dataset(IndicXParaphrase) was made available under the cc-0 license.", "### Template\n\nThe following templates(Malayalam) where used for converting the original dataset:" ]
[ "TAGS\n#task_categories-text-generation #size_categories-n<1K #source_datasets-extended|ai4bharat/IndicXParaphrase #language-Malayalam #license-cc #region-us \n", "### Description\n\nThis dataset is derived from the already existing dataset made by AI4Bharat. We have used the IndicXParaphrase dataset of AI4Bharat to create this instruction style dataset. \nWe have used the malayalam split of the above mentioned dataset to create this one. This was created as part of Aya Open Science Initiative from Cohere For AI.\n\nIndicXParaphrase is multilingual, and n-way parallel dataset for paraphrase detection in 10 Indic languages. The original dataset(IndicXParaphrase) was made available under the cc-0 license.", "### Template\n\nThe following templates(Malayalam) where used for converting the original dataset:" ]
[ 60, 139, 22 ]
[ "passage: TAGS\n#task_categories-text-generation #size_categories-n<1K #source_datasets-extended|ai4bharat/IndicXParaphrase #language-Malayalam #license-cc #region-us \n### Description\n\nThis dataset is derived from the already existing dataset made by AI4Bharat. We have used the IndicXParaphrase dataset of AI4Bharat to create this instruction style dataset. \nWe have used the malayalam split of the above mentioned dataset to create this one. This was created as part of Aya Open Science Initiative from Cohere For AI.\n\nIndicXParaphrase is multilingual, and n-way parallel dataset for paraphrase detection in 10 Indic languages. The original dataset(IndicXParaphrase) was made available under the cc-0 license.### Template\n\nThe following templates(Malayalam) where used for converting the original dataset:" ]
bb75aa9036dadcfa4051cf6be6e7671fead14599
# Livre des procédures fiscales, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - `version`: `string`, denoting the version associated with the element. - `instruction`: `string`, presenting the instruction linked to the element. - `input`: `string`, signifying the input details for the element. - `output`: `string`, indicating the output information for the element. - `complexity`: `int`, reflecting the degree of abstraction requested from the LLM (Legal Language Model). A value of 1 represents an instruction grounded in authoritative text, while 2 introduces added complexity or abstraction. - `created_at`: `date`, capturing the date and time of the document's creation. - `updated_at`: `date`, detailing the most recent update's date and time. - `expiration`: `date`, delineating the expiration date of the legal information. - `status`: `string`, specifying the application status of the law. - `coming_into_force`: `date`, signifying the date when the legal information becomes enforceable. - `language`: `string`, describing the language in which the legal information is presented. - `length`: `int`, offering information regarding the length of the legal content. - `source`: `string`, representing the source from which the legal information originated. We used the following list of instructions for generating the dataset: ```python instructions = [ "Compose l'intégralité de l'article sous forme écrite.", "Écris la totalité du contenu de l'article.", "Formule la totalité du texte présent dans l'article.", "Produis l'intégralité de l'article en écriture.", "Développe l'article dans son ensemble par écrit.", "Génère l'ensemble du texte contenu dans l'article.", "Formule le contenu intégral de l'article en entier.", "Rédige la totalité du texte de l'article en entier.", "Compose l'intégralité du contenu textuel de l'article.", "Rédige l'ensemble du texte qui constitue l'article.", "Formule l'article entier dans son contenu écrit.", "Composez l'intégralité de l'article sous forme écrite.", "Écrivez la totalité du contenu de l'article.", "Formulez la totalité du texte présent dans l'article.", "Développez l'article dans son ensemble par écrit.", "Générez l'ensemble du texte contenu dans l'article.", "Formulez le contenu intégral de l'article en entier.", "Rédigez la totalité du texte de l'article en entier.", "Composez l'intégralité du contenu textuel de l'article.", "Écrivez l'article dans son intégralité en termes de texte.", "Rédigez l'ensemble du texte qui constitue l'article.", "Formulez l'article entier dans son contenu écrit.", "Composer l'intégralité de l'article sous forme écrite.", "Écrire la totalité du contenu de l'article.", "Formuler la totalité du texte présent dans l'article.", "Produire l'intégralité de l'article en écriture.", "Développer l'article dans son ensemble par écrit.", "Générer l'ensemble du texte contenu dans l'article.", "Formuler le contenu intégral de l'article en entier.", "Rédiger la totalité du texte de l'article en entier.", "Composer l'intégralité du contenu textuel de l'article.", "Rédiger l'ensemble du texte qui constitue l'article.", "Formuler l'article entier dans son contenu écrit.", "Quelles sont les dispositions de l'article ?", "Quelles dispositions sont incluses dans l'article ?", "Quelles sont les dispositions énoncées dans l'article ?", "Quel est le texte intégral de l'article ?", "Quelle est la lettre de l'article ?" ] ``` ## Citing this project If you use this code in your research, please use the following BibTeX entry. ```BibTeX @misc{louisbrulenaudet2023, author = {Louis Brulé Naudet}, title = {Livre des procédures fiscales, non-instruct (11-12-2023)}, howpublished = {\url{https://huggingface.co/datasets/louisbrulenaudet/lpf}}, year = {2023} } ``` ## Feedback If you have any feedback, please reach out at [[email protected]](mailto:[email protected]).
lemoneresearch/lpf
[ "task_categories:text-generation", "task_categories:table-question-answering", "task_categories:summarization", "task_categories:conversational", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:original", "language:fr", "license:apache-2.0", "finetuning", "legal", "tax", "llm", "fiscal", "lpf", "Livre des procédures fiscales", "region:us" ]
2023-12-11T14:50:04+00:00
{"language": ["fr"], "license": "apache-2.0", "multilinguality": ["monolingual"], "size_categories": ["n<1K"], "source_datasets": ["original"], "task_categories": ["text-generation", "table-question-answering", "summarization", "conversational"], "pretty_name": "Livre des proc\u00e9dures fiscales (LPF)", "tags": ["finetuning", "legal", "tax", "llm", "fiscal", "lpf", "Livre des proc\u00e9dures fiscales"]}
2023-12-11T14:50:55+00:00
[]
[ "fr" ]
TAGS #task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-n<1K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #tax #llm #fiscal #lpf #Livre des procédures fiscales #region-us
# Livre des procédures fiscales, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - 'version': 'string', denoting the version associated with the element. - 'instruction': 'string', presenting the instruction linked to the element. - 'input': 'string', signifying the input details for the element. - 'output': 'string', indicating the output information for the element. - 'complexity': 'int', reflecting the degree of abstraction requested from the LLM (Legal Language Model). A value of 1 represents an instruction grounded in authoritative text, while 2 introduces added complexity or abstraction. - 'created_at': 'date', capturing the date and time of the document's creation. - 'updated_at': 'date', detailing the most recent update's date and time. - 'expiration': 'date', delineating the expiration date of the legal information. - 'status': 'string', specifying the application status of the law. - 'coming_into_force': 'date', signifying the date when the legal information becomes enforceable. - 'language': 'string', describing the language in which the legal information is presented. - 'length': 'int', offering information regarding the length of the legal content. - 'source': 'string', representing the source from which the legal information originated. We used the following list of instructions for generating the dataset: ## Citing this project If you use this code in your research, please use the following BibTeX entry. ## Feedback If you have any feedback, please reach out at louisbrulenaudet@URL.
[ "# Livre des procédures fiscales, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'version': 'string', denoting the version associated with the element.\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n- 'complexity': 'int', reflecting the degree of abstraction requested from the LLM (Legal Language Model). A value of 1 represents an instruction grounded in authoritative text, while 2 introduces added complexity or abstraction.\n- 'created_at': 'date', capturing the date and time of the document's creation.\n- 'updated_at': 'date', detailing the most recent update's date and time.\n- 'expiration': 'date', delineating the expiration date of the legal information.\n- 'status': 'string', specifying the application status of the law.\n- 'coming_into_force': 'date', signifying the date when the legal information becomes enforceable.\n- 'language': 'string', describing the language in which the legal information is presented.\n- 'length': 'int', offering information regarding the length of the legal content.\n- 'source': 'string', representing the source from which the legal information originated.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ "TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-n<1K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #tax #llm #fiscal #lpf #Livre des procédures fiscales #region-us \n", "# Livre des procédures fiscales, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'version': 'string', denoting the version associated with the element.\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n- 'complexity': 'int', reflecting the degree of abstraction requested from the LLM (Legal Language Model). A value of 1 represents an instruction grounded in authoritative text, while 2 introduces added complexity or abstraction.\n- 'created_at': 'date', capturing the date and time of the document's creation.\n- 'updated_at': 'date', detailing the most recent update's date and time.\n- 'expiration': 'date', delineating the expiration date of the legal information.\n- 'status': 'string', specifying the application status of the law.\n- 'coming_into_force': 'date', signifying the date when the legal information becomes enforceable.\n- 'language': 'string', describing the language in which the legal information is presented.\n- 'length': 'int', offering information regarding the length of the legal content.\n- 'source': 'string', representing the source from which the legal information originated.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ 116, 504, 358, 24, 21 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-n<1K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #tax #llm #fiscal #lpf #Livre des procédures fiscales #region-us \n" ]
6d91d812bf4eef93eb55687483fdb7bdbc1e4821
# Dataset Card for "EUIPO_QA" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
agil/EUIPO_QA
[ "region:us" ]
2023-12-11T14:51:44+00:00
{"dataset_info": {"features": [{"name": "ID", "dtype": "int64"}, {"name": "question", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "category", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 145159.30633802817, "num_examples": 227}, {"name": "test", "num_bytes": 36449.69366197183, "num_examples": 57}], "download_size": 93579, "dataset_size": 181609.0}}
2024-01-11T21:56:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "EUIPO_QA" More Information needed
[ "# Dataset Card for \"EUIPO_QA\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"EUIPO_QA\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"EUIPO_QA\"\n\nMore Information needed" ]
b6e602ed7e1ce9f89d150301ae8fe68b541ba946
# Code Général des Impôts, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - `version`: `string`, denoting the version associated with the element. - `instruction`: `string`, presenting the instruction linked to the element. - `input`: `string`, signifying the input details for the element. - `output`: `string`, indicating the output information for the element. - `complexity`: `int`, reflecting the degree of abstraction requested from the LLM (Legal Language Model). A value of 1 represents an instruction grounded in authoritative text, while 2 introduces added complexity or abstraction. - `created_at`: `date`, capturing the date and time of the document's creation. - `updated_at`: `date`, detailing the most recent update's date and time. - `expiration`: `date`, delineating the expiration date of the legal information. - `status`: `string`, specifying the application status of the law. - `coming_into_force`: `date`, signifying the date when the legal information becomes enforceable. - `language`: `string`, describing the language in which the legal information is presented. - `length`: `int`, offering information regarding the length of the legal content. - `source`: `string`, representing the source from which the legal information originated. We used the following list of instructions for generating the dataset: ```python instructions = [ "Compose l'intégralité de l'article sous forme écrite.", "Écris la totalité du contenu de l'article.", "Formule la totalité du texte présent dans l'article.", "Produis l'intégralité de l'article en écriture.", "Développe l'article dans son ensemble par écrit.", "Génère l'ensemble du texte contenu dans l'article.", "Formule le contenu intégral de l'article en entier.", "Rédige la totalité du texte de l'article en entier.", "Compose l'intégralité du contenu textuel de l'article.", "Rédige l'ensemble du texte qui constitue l'article.", "Formule l'article entier dans son contenu écrit.", "Composez l'intégralité de l'article sous forme écrite.", "Écrivez la totalité du contenu de l'article.", "Formulez la totalité du texte présent dans l'article.", "Développez l'article dans son ensemble par écrit.", "Générez l'ensemble du texte contenu dans l'article.", "Formulez le contenu intégral de l'article en entier.", "Rédigez la totalité du texte de l'article en entier.", "Composez l'intégralité du contenu textuel de l'article.", "Écrivez l'article dans son intégralité en termes de texte.", "Rédigez l'ensemble du texte qui constitue l'article.", "Formulez l'article entier dans son contenu écrit.", "Composer l'intégralité de l'article sous forme écrite.", "Écrire la totalité du contenu de l'article.", "Formuler la totalité du texte présent dans l'article.", "Produire l'intégralité de l'article en écriture.", "Développer l'article dans son ensemble par écrit.", "Générer l'ensemble du texte contenu dans l'article.", "Formuler le contenu intégral de l'article en entier.", "Rédiger la totalité du texte de l'article en entier.", "Composer l'intégralité du contenu textuel de l'article.", "Rédiger l'ensemble du texte qui constitue l'article.", "Formuler l'article entier dans son contenu écrit.", "Quelles sont les dispositions de l'article ?", "Quelles dispositions sont incluses dans l'article ?", "Quelles sont les dispositions énoncées dans l'article ?", "Quel est le texte intégral de l'article ?", "Quelle est la lettre de l'article ?" ] ``` ## Citing this project If you use this code in your research, please use the following BibTeX entry. ```BibTeX @misc{louisbrulenaudet2023, author = {Louis Brulé Naudet}, title = {Code Général des Impôts, non-instruct (11-12-2023)}, howpublished = {\url{https://huggingface.co/datasets/louisbrulenaudet/cgi}}, year = {2023} } ``` ## Feedback If you have any feedback, please reach out at [[email protected]](mailto:[email protected]).
lemoneresearch/cgi
[ "task_categories:text-generation", "task_categories:table-question-answering", "task_categories:summarization", "task_categories:conversational", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:fr", "license:apache-2.0", "finetuning", "legal", "tax", "llm", "fiscal", "cgi", "Code Général des Impôts", "region:us" ]
2023-12-11T14:55:01+00:00
{"language": ["fr"], "license": "apache-2.0", "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["text-generation", "table-question-answering", "summarization", "conversational"], "pretty_name": "Code G\u00e9n\u00e9ral des Imp\u00f4ts (CGI)", "tags": ["finetuning", "legal", "tax", "llm", "fiscal", "cgi", "Code G\u00e9n\u00e9ral des Imp\u00f4ts"]}
2023-12-11T14:55:57+00:00
[]
[ "fr" ]
TAGS #task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #tax #llm #fiscal #cgi #Code Général des Impôts #region-us
# Code Général des Impôts, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - 'version': 'string', denoting the version associated with the element. - 'instruction': 'string', presenting the instruction linked to the element. - 'input': 'string', signifying the input details for the element. - 'output': 'string', indicating the output information for the element. - 'complexity': 'int', reflecting the degree of abstraction requested from the LLM (Legal Language Model). A value of 1 represents an instruction grounded in authoritative text, while 2 introduces added complexity or abstraction. - 'created_at': 'date', capturing the date and time of the document's creation. - 'updated_at': 'date', detailing the most recent update's date and time. - 'expiration': 'date', delineating the expiration date of the legal information. - 'status': 'string', specifying the application status of the law. - 'coming_into_force': 'date', signifying the date when the legal information becomes enforceable. - 'language': 'string', describing the language in which the legal information is presented. - 'length': 'int', offering information regarding the length of the legal content. - 'source': 'string', representing the source from which the legal information originated. We used the following list of instructions for generating the dataset: ## Citing this project If you use this code in your research, please use the following BibTeX entry. ## Feedback If you have any feedback, please reach out at louisbrulenaudet@URL.
[ "# Code Général des Impôts, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'version': 'string', denoting the version associated with the element.\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n- 'complexity': 'int', reflecting the degree of abstraction requested from the LLM (Legal Language Model). A value of 1 represents an instruction grounded in authoritative text, while 2 introduces added complexity or abstraction.\n- 'created_at': 'date', capturing the date and time of the document's creation.\n- 'updated_at': 'date', detailing the most recent update's date and time.\n- 'expiration': 'date', delineating the expiration date of the legal information.\n- 'status': 'string', specifying the application status of the law.\n- 'coming_into_force': 'date', signifying the date when the legal information becomes enforceable.\n- 'language': 'string', describing the language in which the legal information is presented.\n- 'length': 'int', offering information regarding the length of the legal content.\n- 'source': 'string', representing the source from which the legal information originated.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ "TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #tax #llm #fiscal #cgi #Code Général des Impôts #region-us \n", "# Code Général des Impôts, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'version': 'string', denoting the version associated with the element.\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n- 'complexity': 'int', reflecting the degree of abstraction requested from the LLM (Legal Language Model). A value of 1 represents an instruction grounded in authoritative text, while 2 introduces added complexity or abstraction.\n- 'created_at': 'date', capturing the date and time of the document's creation.\n- 'updated_at': 'date', detailing the most recent update's date and time.\n- 'expiration': 'date', delineating the expiration date of the legal information.\n- 'status': 'string', specifying the application status of the law.\n- 'coming_into_force': 'date', signifying the date when the legal information becomes enforceable.\n- 'language': 'string', describing the language in which the legal information is presented.\n- 'length': 'int', offering information regarding the length of the legal content.\n- 'source': 'string', representing the source from which the legal information originated.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ 117, 504, 358, 24, 21 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #tax #llm #fiscal #cgi #Code Général des Impôts #region-us \n" ]
85fefb51eb3df1a8e8ad2c5b6d85008b09deba47
# QUORA_ONE_MANY_QA This dataset is derived from **quora.com** questioning data. It is a question with multiple answers. # STATISTICS - 902-1000000 12.5G - Updating...
LxYxvv/quora_qa
[ "task_categories:question-answering", "license:mit", "region:us" ]
2023-12-11T14:56:52+00:00
{"license": "mit", "task_categories": ["question-answering"]}
2024-02-17T06:54:41+00:00
[]
[]
TAGS #task_categories-question-answering #license-mit #region-us
# QUORA_ONE_MANY_QA This dataset is derived from URL questioning data. It is a question with multiple answers. # STATISTICS - 902-1000000 12.5G - Updating...
[ "# QUORA_ONE_MANY_QA\nThis dataset is derived from URL questioning data. It is a question with multiple answers.", "# STATISTICS\n- 902-1000000 12.5G\n- Updating..." ]
[ "TAGS\n#task_categories-question-answering #license-mit #region-us \n", "# QUORA_ONE_MANY_QA\nThis dataset is derived from URL questioning data. It is a question with multiple answers.", "# STATISTICS\n- 902-1000000 12.5G\n- Updating..." ]
[ 23, 31, 15 ]
[ "passage: TAGS\n#task_categories-question-answering #license-mit #region-us \n# QUORA_ONE_MANY_QA\nThis dataset is derived from URL questioning data. It is a question with multiple answers.# STATISTICS\n- 902-1000000 12.5G\n- Updating..." ]
65fad0b49c0f3e54453f37a0e37ebbe2a7cac76e
# 🏟️ Long Code Arena (Code Editing) This is the benchmark for Code Editing task as part of 🏟️ [Long Code Arena benchmark](https://huggingface.co/spaces/JetBrains-Research/long-code-arena). ## How-to > Temporary: While the dataset is private, if you haven't used HF Hub before, add your token via `huggingface-cli` > first: > > ``` > huggingface-cli login > ``` 1. List all the available configs via [`datasets.get_dataset_config_names`](https://huggingface.co/docs/datasets/v2.14.3/en/package_reference/loading_methods#datasets.get_dataset_config_names) and choose an appropriate one. Current configs: `commitchronicle-py-long`, `commitchronicle-py-long-labels` 2. Load the data via [`load_dataset`](https://huggingface.co/docs/datasets/v2.14.3/en/package_reference/loading_methods#datasets.load_dataset): ``` from datasets import load_dataset configuration = "TODO" # select a configuration dataset = load_dataset("JetBrains-Research/lca-code-editing", configuration, split="test") ``` Note that all the data we have is considered to be in the test split. **Note.** Working with git repositories under [`repos`](https://huggingface.co/datasets/JetBrains-Research/lca-code-editing/tree/main/repos) directory is not supported via 🤗 Datasets. Download and extract the contents of each repository. We provide a full list of files in [`paths.json`](https://huggingface.co/datasets/JetBrains-Research/lca-code-editing/blob/main/paths.json). ## Dataset Structure This dataset contains three kinds of data: * *full data* about each commit (including modifications) * metadata with quality *labels* * compressed *git repositories* ### Full data This section concerns configuration with *full data* about each commit (no `-labels` suffix). Each example has the following fields: | **Field** | **Description** | |:---------:|:-----------------------------------------:| | `repo` | Commit repository. | | `hash` | Commit hash. | | `date` | Commit date. | | `license` | Commit repository's license. | | `message` | Commit message. | | `mods` | List of file modifications from a commit. | Each file modification has the following fields: | **Field** | **Description** | |:-------------:|:-------------------------------------------------------------------------------------------------:| | `change_type` | Type of change to current file. One of: `ADD`, `COPY`, `RENAME`, `DELETE`, `MODIFY` or `UNKNOWN`. | | `old_path` | Path to file before change (might be empty). | | `new_path` | Path to file after change (might be empty). | | `diff` | `git diff` for current file. | Data point example: ``` {'hash': 'f6347ae47c872b40339d9565a9cb29da5bca8716', 'repo': 'mycroftai/mycroft-core', 'date': None, 'license': None, 'message': 'Replace hashed meta with skill_gid as identifier\nThis also removes the notion of an owner skill and all skills may update settings on the server.', 'mods': [{'change_type': 'MODIFY', 'new_path': 'mycroft/skills/settings.py', 'old_path': 'mycroft/skills/settings.py', 'diff': '@@ -216,32 +216,10 @@ class SkillSettings(dict):<...>'}]} ``` ### Labels This section concerns configuration with metadata and *labels* (with `-labels` suffix). Each example has the following fields: | **Field** | **Description** | |:---------:|:------------------------------------------------------------------:| | `repo` | Commit repository. | | `hash` | Commit hash. | | `date` | Commit date. | | `license` | Commit repository's license. | | `message` | Commit message. | | `label` | Label of current commit as a target for code editing task. | | `comment` | Comment for a label for current commit (optional, might be empty). | Labels are in 1-5 scale, where: * 1 – strong no * 2 – weak no * 3 – unsure * 4 – weak yes * 5 – strong yes Data point example: ``` {'hash': 'b9747bc011e9e9830ab147327d7aeaa8447ad2d7', 'repo': 'apache/libcloud', 'date': '20.02.2020 00:11:58', 'license': 'Apache License 2.0', 'message': 'Add new storage API methods for downloading part of an object (range\ndownload) and implement it for the S3 and local storage drivers.', 'label': 4.0, 'comment': 'might be an interesting use-case (and also quite complicated)'} ``` ### Git Repositories This section concerns [`repos`](https://huggingface.co/datasets/JetBrains-Research/lca-code-editing/tree/main/repos) directory, which stores compressed Git repositories for all the commits in this benchmark. After you download and extract it, you can work with each repository either via Git or via Python libraries like [GitPython](https://github.com/gitpython-developers/GitPython) or [PyDriller](https://github.com/ishepard/pydriller).
JetBrains-Research/lca-code-editing
[ "region:us" ]
2023-12-11T15:03:37+00:00
{"dataset_info": [{"config_name": "commitchronicle-py-long", "features": [{"name": "hash", "dtype": "string"}, {"name": "repo", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "license", "dtype": "string"}, {"name": "message", "dtype": "string"}, {"name": "mods", "list": [{"name": "change_type", "dtype": "string"}, {"name": "old_path", "dtype": "string"}, {"name": "new_path", "dtype": "string"}, {"name": "diff", "dtype": "string"}]}], "splits": [{"name": "test", "num_examples": 119}]}, {"config_name": "commitchronicle-py-long-labels", "features": [{"name": "hash", "dtype": "string"}, {"name": "repo", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "license", "dtype": "string"}, {"name": "message", "dtype": "string"}, {"name": "label", "dtype": "int8"}, {"name": "comment", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 263065, "num_examples": 858}], "download_size": 150455, "dataset_size": 263065}], "configs": [{"config_name": "commitchronicle-py-long", "data_files": [{"split": "test", "path": "commitchronicle-py-long/test-*"}]}, {"config_name": "commitchronicle-py-long-labels", "data_files": [{"split": "test", "path": "commitchronicle-py-long-labels/test-*"}]}]}
2024-01-10T15:41:44+00:00
[]
[]
TAGS #region-us
️ Long Code Arena (Code Editing) ================================ This is the benchmark for Code Editing task as part of ️ Long Code Arena benchmark. How-to ------ > > Temporary: While the dataset is private, if you haven't used HF Hub before, add your token via 'huggingface-cli' > first: > > > 1. List all the available configs via 'datasets.get\_dataset\_config\_names' and choose an appropriate one. Current configs: 'commitchronicle-py-long', 'commitchronicle-py-long-labels' 2. Load the data via 'load\_dataset': Note that all the data we have is considered to be in the test split. Note. Working with git repositories under 'repos' directory is not supported via Datasets. Download and extract the contents of each repository. We provide a full list of files in 'URL'. Dataset Structure ----------------- This dataset contains three kinds of data: * *full data* about each commit (including modifications) * metadata with quality *labels* * compressed *git repositories* ### Full data This section concerns configuration with *full data* about each commit (no '-labels' suffix). Each example has the following fields: Each file modification has the following fields: Data point example: ### Labels This section concerns configuration with metadata and *labels* (with '-labels' suffix). Each example has the following fields: Labels are in 1-5 scale, where: * 1 – strong no * 2 – weak no * 3 – unsure * 4 – weak yes * 5 – strong yes Data point example: ### Git Repositories This section concerns 'repos' directory, which stores compressed Git repositories for all the commits in this benchmark. After you download and extract it, you can work with each repository either via Git or via Python libraries like GitPython or PyDriller.
[ "### Full data\n\n\nThis section concerns configuration with *full data* about each commit (no '-labels' suffix).\n\n\nEach example has the following fields:\n\n\n\nEach file modification has the following fields:\n\n\n\nData point example:", "### Labels\n\n\nThis section concerns configuration with metadata and *labels* (with '-labels' suffix).\n\n\nEach example has the following fields:\n\n\n\nLabels are in 1-5 scale, where:\n\n\n* 1 – strong no\n* 2 – weak no\n* 3 – unsure\n* 4 – weak yes\n* 5 – strong yes\n\n\nData point example:", "### Git Repositories\n\n\nThis section concerns 'repos'\ndirectory, which stores compressed Git repositories for all the commits in this benchmark. After you download and\nextract it, you can work with each repository either via Git or via Python libraries\nlike GitPython\nor PyDriller." ]
[ "TAGS\n#region-us \n", "### Full data\n\n\nThis section concerns configuration with *full data* about each commit (no '-labels' suffix).\n\n\nEach example has the following fields:\n\n\n\nEach file modification has the following fields:\n\n\n\nData point example:", "### Labels\n\n\nThis section concerns configuration with metadata and *labels* (with '-labels' suffix).\n\n\nEach example has the following fields:\n\n\n\nLabels are in 1-5 scale, where:\n\n\n* 1 – strong no\n* 2 – weak no\n* 3 – unsure\n* 4 – weak yes\n* 5 – strong yes\n\n\nData point example:", "### Git Repositories\n\n\nThis section concerns 'repos'\ndirectory, which stores compressed Git repositories for all the commits in this benchmark. After you download and\nextract it, you can work with each repository either via Git or via Python libraries\nlike GitPython\nor PyDriller." ]
[ 6, 49, 74, 74 ]
[ "passage: TAGS\n#region-us \n### Full data\n\n\nThis section concerns configuration with *full data* about each commit (no '-labels' suffix).\n\n\nEach example has the following fields:\n\n\n\nEach file modification has the following fields:\n\n\n\nData point example:### Labels\n\n\nThis section concerns configuration with metadata and *labels* (with '-labels' suffix).\n\n\nEach example has the following fields:\n\n\n\nLabels are in 1-5 scale, where:\n\n\n* 1 – strong no\n* 2 – weak no\n* 3 – unsure\n* 4 – weak yes\n* 5 – strong yes\n\n\nData point example:### Git Repositories\n\n\nThis section concerns 'repos'\ndirectory, which stores compressed Git repositories for all the commits in this benchmark. After you download and\nextract it, you can work with each repository either via Git or via Python libraries\nlike GitPython\nor PyDriller." ]
26b11d70e8cd47e93519f0c5b9871a2ef01a241d
# Livre des procédures fiscales, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - `instruction`: `string`, presenting the instruction linked to the element. - `input`: `string`, signifying the input details for the element. - `output`: `string`, indicating the output information for the element. We used the following list of instructions for generating the dataset: ```python instructions = [ "Compose l'intégralité de l'article sous forme écrite.", "Écris la totalité du contenu de l'article.", "Formule la totalité du texte présent dans l'article.", "Produis l'intégralité de l'article en écriture.", "Développe l'article dans son ensemble par écrit.", "Génère l'ensemble du texte contenu dans l'article.", "Formule le contenu intégral de l'article en entier.", "Rédige la totalité du texte de l'article en entier.", "Compose l'intégralité du contenu textuel de l'article.", "Rédige l'ensemble du texte qui constitue l'article.", "Formule l'article entier dans son contenu écrit.", "Composez l'intégralité de l'article sous forme écrite.", "Écrivez la totalité du contenu de l'article.", "Formulez la totalité du texte présent dans l'article.", "Développez l'article dans son ensemble par écrit.", "Générez l'ensemble du texte contenu dans l'article.", "Formulez le contenu intégral de l'article en entier.", "Rédigez la totalité du texte de l'article en entier.", "Composez l'intégralité du contenu textuel de l'article.", "Écrivez l'article dans son intégralité en termes de texte.", "Rédigez l'ensemble du texte qui constitue l'article.", "Formulez l'article entier dans son contenu écrit.", "Composer l'intégralité de l'article sous forme écrite.", "Écrire la totalité du contenu de l'article.", "Formuler la totalité du texte présent dans l'article.", "Produire l'intégralité de l'article en écriture.", "Développer l'article dans son ensemble par écrit.", "Générer l'ensemble du texte contenu dans l'article.", "Formuler le contenu intégral de l'article en entier.", "Rédiger la totalité du texte de l'article en entier.", "Composer l'intégralité du contenu textuel de l'article.", "Rédiger l'ensemble du texte qui constitue l'article.", "Formuler l'article entier dans son contenu écrit.", "Quelles sont les dispositions de l'article ?", "Quelles dispositions sont incluses dans l'article ?", "Quelles sont les dispositions énoncées dans l'article ?", "Quel est le texte intégral de l'article ?", "Quelle est la lettre de l'article ?" ] ``` ## Citing this project If you use this code in your research, please use the following BibTeX entry. ```BibTeX @misc{louisbrulenaudet2023, author = {Louis Brulé Naudet}, title = {Livre des procédures fiscales, non-instruct (11-12-2023)}, howpublished = {\url{https://huggingface.co/datasets/louisbrulenaudet/lpf}}, year = {2023} } ``` ## Feedback If you have any feedback, please reach out at [[email protected]](mailto:[email protected]).
louisbrulenaudet/lpf
[ "task_categories:text-generation", "task_categories:table-question-answering", "task_categories:summarization", "task_categories:conversational", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:original", "language:fr", "license:apache-2.0", "finetuning", "legal", "tax", "llm", "fiscal", "lpf", "Livre des procédures fiscales", "doi:10.57967/hf/1439", "region:us" ]
2023-12-11T15:11:08+00:00
{"language": ["fr"], "license": "apache-2.0", "multilinguality": ["monolingual"], "size_categories": ["n<1K"], "source_datasets": ["original"], "task_categories": ["text-generation", "table-question-answering", "summarization", "conversational"], "pretty_name": "Livre des proc\u00e9dures fiscales (LPF)", "tags": ["finetuning", "legal", "tax", "llm", "fiscal", "lpf", "Livre des proc\u00e9dures fiscales"]}
2023-12-11T15:12:01+00:00
[]
[ "fr" ]
TAGS #task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-n<1K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #tax #llm #fiscal #lpf #Livre des procédures fiscales #doi-10.57967/hf/1439 #region-us
# Livre des procédures fiscales, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - 'instruction': 'string', presenting the instruction linked to the element. - 'input': 'string', signifying the input details for the element. - 'output': 'string', indicating the output information for the element. We used the following list of instructions for generating the dataset: ## Citing this project If you use this code in your research, please use the following BibTeX entry. ## Feedback If you have any feedback, please reach out at louisbrulenaudet@URL.
[ "# Livre des procédures fiscales, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ "TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-n<1K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #tax #llm #fiscal #lpf #Livre des procédures fiscales #doi-10.57967/hf/1439 #region-us \n", "# Livre des procédures fiscales, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ 128, 504, 100, 24, 21 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-n<1K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #tax #llm #fiscal #lpf #Livre des procédures fiscales #doi-10.57967/hf/1439 #region-us \n" ]
d5628f045420f213017b5b71deff77f1406fdbee
# Code Général des Impôts, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - `instruction`: `string`, presenting the instruction linked to the element. - `input`: `string`, signifying the input details for the element. - `output`: `string`, indicating the output information for the element. We used the following list of instructions for generating the dataset: ```python instructions = [ "Compose l'intégralité de l'article sous forme écrite.", "Écris la totalité du contenu de l'article.", "Formule la totalité du texte présent dans l'article.", "Produis l'intégralité de l'article en écriture.", "Développe l'article dans son ensemble par écrit.", "Génère l'ensemble du texte contenu dans l'article.", "Formule le contenu intégral de l'article en entier.", "Rédige la totalité du texte de l'article en entier.", "Compose l'intégralité du contenu textuel de l'article.", "Rédige l'ensemble du texte qui constitue l'article.", "Formule l'article entier dans son contenu écrit.", "Composez l'intégralité de l'article sous forme écrite.", "Écrivez la totalité du contenu de l'article.", "Formulez la totalité du texte présent dans l'article.", "Développez l'article dans son ensemble par écrit.", "Générez l'ensemble du texte contenu dans l'article.", "Formulez le contenu intégral de l'article en entier.", "Rédigez la totalité du texte de l'article en entier.", "Composez l'intégralité du contenu textuel de l'article.", "Écrivez l'article dans son intégralité en termes de texte.", "Rédigez l'ensemble du texte qui constitue l'article.", "Formulez l'article entier dans son contenu écrit.", "Composer l'intégralité de l'article sous forme écrite.", "Écrire la totalité du contenu de l'article.", "Formuler la totalité du texte présent dans l'article.", "Produire l'intégralité de l'article en écriture.", "Développer l'article dans son ensemble par écrit.", "Générer l'ensemble du texte contenu dans l'article.", "Formuler le contenu intégral de l'article en entier.", "Rédiger la totalité du texte de l'article en entier.", "Composer l'intégralité du contenu textuel de l'article.", "Rédiger l'ensemble du texte qui constitue l'article.", "Formuler l'article entier dans son contenu écrit.", "Quelles sont les dispositions de l'article ?", "Quelles dispositions sont incluses dans l'article ?", "Quelles sont les dispositions énoncées dans l'article ?", "Quel est le texte intégral de l'article ?", "Quelle est la lettre de l'article ?" ] ``` ## Citing this project If you use this code in your research, please use the following BibTeX entry. ```BibTeX @misc{louisbrulenaudet2023, author = {Louis Brulé Naudet}, title = {Code Général des Impôts, non-instruct (11-12-2023)}, howpublished = {\url{https://huggingface.co/datasets/louisbrulenaudet/cgi}}, year = {2023} } ``` ## Feedback If you have any feedback, please reach out at [[email protected]](mailto:[email protected]).
louisbrulenaudet/cgi
[ "task_categories:text-generation", "task_categories:table-question-answering", "task_categories:summarization", "task_categories:conversational", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:fr", "license:apache-2.0", "finetuning", "legal", "tax", "llm", "fiscal", "cgi", "Code Général des Impôts", "doi:10.57967/hf/1438", "region:us" ]
2023-12-11T15:12:43+00:00
{"language": ["fr"], "license": "apache-2.0", "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["text-generation", "table-question-answering", "summarization", "conversational"], "pretty_name": "Code G\u00e9n\u00e9ral des Imp\u00f4ts (CGI)", "tags": ["finetuning", "legal", "tax", "llm", "fiscal", "cgi", "Code G\u00e9n\u00e9ral des Imp\u00f4ts"]}
2023-12-11T15:13:33+00:00
[]
[ "fr" ]
TAGS #task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #tax #llm #fiscal #cgi #Code Général des Impôts #doi-10.57967/hf/1438 #region-us
# Code Général des Impôts, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - 'instruction': 'string', presenting the instruction linked to the element. - 'input': 'string', signifying the input details for the element. - 'output': 'string', indicating the output information for the element. We used the following list of instructions for generating the dataset: ## Citing this project If you use this code in your research, please use the following BibTeX entry. ## Feedback If you have any feedback, please reach out at louisbrulenaudet@URL.
[ "# Code Général des Impôts, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ "TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #tax #llm #fiscal #cgi #Code Général des Impôts #doi-10.57967/hf/1438 #region-us \n", "# Code Général des Impôts, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for tax practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ 129, 504, 100, 24, 21 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #tax #llm #fiscal #cgi #Code Général des Impôts #doi-10.57967/hf/1438 #region-us \n" ]
46b2b8ca5fe89ef9776bbb2673c934f101f801b8
## Truthy DPO This is a dataset designed to enhance the overall truthfulness of LLMs, without sacrificing immersion when roleplaying as a human. For example, in normal AI assistant model, the model should not try to describe what the warmth of the sun feels like, but if the system prompt indicates it's a human, it should. Mostly targets corporeal, spacial, temporal awareness, and common misconceptions. ### Contribute If you're interested in new functionality/datasets, take a look at [bagel repo](https://github.com/jondurbin/bagel) and [airoboros](https://github.com/jondurbin/airoboros) and either make a PR or open an issue with details. To help me with the fine-tuning costs, dataset generation, etc., please use one of the following: - https://bmc.link/jondurbin - ETH 0xce914eAFC2fe52FdceE59565Dd92c06f776fcb11 - BTC bc1qdwuth4vlg8x37ggntlxu5cjfwgmdy5zaa7pswf
jondurbin/truthy-dpo-v0.1
[ "license:cc-by-4.0", "region:us" ]
2023-12-11T15:34:04+00:00
{"license": "cc-by-4.0"}
2024-01-11T10:19:14+00:00
[]
[]
TAGS #license-cc-by-4.0 #region-us
## Truthy DPO This is a dataset designed to enhance the overall truthfulness of LLMs, without sacrificing immersion when roleplaying as a human. For example, in normal AI assistant model, the model should not try to describe what the warmth of the sun feels like, but if the system prompt indicates it's a human, it should. Mostly targets corporeal, spacial, temporal awareness, and common misconceptions. ### Contribute If you're interested in new functionality/datasets, take a look at bagel repo and airoboros and either make a PR or open an issue with details. To help me with the fine-tuning costs, dataset generation, etc., please use one of the following: - URL - ETH 0xce914eAFC2fe52FdceE59565Dd92c06f776fcb11 - BTC bc1qdwuth4vlg8x37ggntlxu5cjfwgmdy5zaa7pswf
[ "## Truthy DPO\n\nThis is a dataset designed to enhance the overall truthfulness of LLMs, without sacrificing immersion when roleplaying as a human.\n\nFor example, in normal AI assistant model, the model should not try to describe what the warmth of the sun feels like, but if the system prompt indicates it's a human, it should.\n\nMostly targets corporeal, spacial, temporal awareness, and common misconceptions.", "### Contribute\n\nIf you're interested in new functionality/datasets, take a look at bagel repo and airoboros and either make a PR or open an issue with details.\n\nTo help me with the fine-tuning costs, dataset generation, etc., please use one of the following:\n\n- URL\n- ETH 0xce914eAFC2fe52FdceE59565Dd92c06f776fcb11\n- BTC bc1qdwuth4vlg8x37ggntlxu5cjfwgmdy5zaa7pswf" ]
[ "TAGS\n#license-cc-by-4.0 #region-us \n", "## Truthy DPO\n\nThis is a dataset designed to enhance the overall truthfulness of LLMs, without sacrificing immersion when roleplaying as a human.\n\nFor example, in normal AI assistant model, the model should not try to describe what the warmth of the sun feels like, but if the system prompt indicates it's a human, it should.\n\nMostly targets corporeal, spacial, temporal awareness, and common misconceptions.", "### Contribute\n\nIf you're interested in new functionality/datasets, take a look at bagel repo and airoboros and either make a PR or open an issue with details.\n\nTo help me with the fine-tuning costs, dataset generation, etc., please use one of the following:\n\n- URL\n- ETH 0xce914eAFC2fe52FdceE59565Dd92c06f776fcb11\n- BTC bc1qdwuth4vlg8x37ggntlxu5cjfwgmdy5zaa7pswf" ]
[ 15, 99, 131 ]
[ "passage: TAGS\n#license-cc-by-4.0 #region-us \n## Truthy DPO\n\nThis is a dataset designed to enhance the overall truthfulness of LLMs, without sacrificing immersion when roleplaying as a human.\n\nFor example, in normal AI assistant model, the model should not try to describe what the warmth of the sun feels like, but if the system prompt indicates it's a human, it should.\n\nMostly targets corporeal, spacial, temporal awareness, and common misconceptions.### Contribute\n\nIf you're interested in new functionality/datasets, take a look at bagel repo and airoboros and either make a PR or open an issue with details.\n\nTo help me with the fine-tuning costs, dataset generation, etc., please use one of the following:\n\n- URL\n- ETH 0xce914eAFC2fe52FdceE59565Dd92c06f776fcb11\n- BTC bc1qdwuth4vlg8x37ggntlxu5cjfwgmdy5zaa7pswf" ]
92712e086d4099c1f05be6a6e02439bf2f2d9f2f
## Toxic-DPO This is a highly toxic, "harmful" dataset meant to illustrate how DPO can be used to de-censor/unalign a model quite easily using direct-preference-optimization (DPO) using very few examples. Most of the examples still contain some amount of warnings/disclaimers, so it's still somewhat editorialized. ## Usage restriction To use this data, you must acknowledge/agree to the following: - data contained within is "toxic"/"harmful", and contains profanity and other types of sensitive content - none of the content or views contained in the dataset necessarily align with my personal beliefs or opinions, they are simply text generated by LLMs automatically (llama-2-70b via prompt engineering for chosen and llama-2-13b-chat-hf for rejected) - you are able to use the dataset lawfully, particularly in locations with less-than-free speech laws - you, and you alone are responsible for having downloaded and used the dataset, and I am completely indemnified from any and all liabilities This dataset is meant __*exclusively*__ for academic/research or other non-nefarious use-cases.
unalignment/toxic-dpo-v0.1
[ "license:cc-by-4.0", "not-for-all-audiences", "region:us" ]
2023-12-11T15:51:16+00:00
{"license": "cc-by-4.0", "tags": ["not-for-all-audiences"]}
2023-12-26T18:08:07+00:00
[]
[]
TAGS #license-cc-by-4.0 #not-for-all-audiences #region-us
## Toxic-DPO This is a highly toxic, "harmful" dataset meant to illustrate how DPO can be used to de-censor/unalign a model quite easily using direct-preference-optimization (DPO) using very few examples. Most of the examples still contain some amount of warnings/disclaimers, so it's still somewhat editorialized. ## Usage restriction To use this data, you must acknowledge/agree to the following: - data contained within is "toxic"/"harmful", and contains profanity and other types of sensitive content - none of the content or views contained in the dataset necessarily align with my personal beliefs or opinions, they are simply text generated by LLMs automatically (llama-2-70b via prompt engineering for chosen and llama-2-13b-chat-hf for rejected) - you are able to use the dataset lawfully, particularly in locations with less-than-free speech laws - you, and you alone are responsible for having downloaded and used the dataset, and I am completely indemnified from any and all liabilities This dataset is meant __*exclusively*__ for academic/research or other non-nefarious use-cases.
[ "## Toxic-DPO\n\nThis is a highly toxic, \"harmful\" dataset meant to illustrate how DPO can be used to de-censor/unalign a model quite easily using direct-preference-optimization (DPO) using very few examples.\n\nMost of the examples still contain some amount of warnings/disclaimers, so it's still somewhat editorialized.", "## Usage restriction\n\nTo use this data, you must acknowledge/agree to the following:\n- data contained within is \"toxic\"/\"harmful\", and contains profanity and other types of sensitive content\n- none of the content or views contained in the dataset necessarily align with my personal beliefs or opinions, they are simply text generated by LLMs automatically (llama-2-70b via prompt engineering for chosen and llama-2-13b-chat-hf for rejected)\n- you are able to use the dataset lawfully, particularly in locations with less-than-free speech laws\n- you, and you alone are responsible for having downloaded and used the dataset, and I am completely indemnified from any and all liabilities\n\nThis dataset is meant __*exclusively*__ for academic/research or other non-nefarious use-cases." ]
[ "TAGS\n#license-cc-by-4.0 #not-for-all-audiences #region-us \n", "## Toxic-DPO\n\nThis is a highly toxic, \"harmful\" dataset meant to illustrate how DPO can be used to de-censor/unalign a model quite easily using direct-preference-optimization (DPO) using very few examples.\n\nMost of the examples still contain some amount of warnings/disclaimers, so it's still somewhat editorialized.", "## Usage restriction\n\nTo use this data, you must acknowledge/agree to the following:\n- data contained within is \"toxic\"/\"harmful\", and contains profanity and other types of sensitive content\n- none of the content or views contained in the dataset necessarily align with my personal beliefs or opinions, they are simply text generated by LLMs automatically (llama-2-70b via prompt engineering for chosen and llama-2-13b-chat-hf for rejected)\n- you are able to use the dataset lawfully, particularly in locations with less-than-free speech laws\n- you, and you alone are responsible for having downloaded and used the dataset, and I am completely indemnified from any and all liabilities\n\nThis dataset is meant __*exclusively*__ for academic/research or other non-nefarious use-cases." ]
[ 24, 86, 196 ]
[ "passage: TAGS\n#license-cc-by-4.0 #not-for-all-audiences #region-us \n## Toxic-DPO\n\nThis is a highly toxic, \"harmful\" dataset meant to illustrate how DPO can be used to de-censor/unalign a model quite easily using direct-preference-optimization (DPO) using very few examples.\n\nMost of the examples still contain some amount of warnings/disclaimers, so it's still somewhat editorialized.## Usage restriction\n\nTo use this data, you must acknowledge/agree to the following:\n- data contained within is \"toxic\"/\"harmful\", and contains profanity and other types of sensitive content\n- none of the content or views contained in the dataset necessarily align with my personal beliefs or opinions, they are simply text generated by LLMs automatically (llama-2-70b via prompt engineering for chosen and llama-2-13b-chat-hf for rejected)\n- you are able to use the dataset lawfully, particularly in locations with less-than-free speech laws\n- you, and you alone are responsible for having downloaded and used the dataset, and I am completely indemnified from any and all liabilities\n\nThis dataset is meant __*exclusively*__ for academic/research or other non-nefarious use-cases." ]
9ccdd2aa175ba4b2e23824bf1526faa5f0eb209a
# Dataset Card for "bash_images_2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SpongeBash/bash_images_2
[ "region:us" ]
2023-12-11T16:03:41+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 146725.0, "num_examples": 12}], "download_size": 148375, "dataset_size": 146725.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-11T16:03:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bash_images_2" More Information needed
[ "# Dataset Card for \"bash_images_2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bash_images_2\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"bash_images_2\"\n\nMore Information needed" ]
58cdc294fe2b5356afca2770ec921fd88b31c397
# Dataset Card for Evaluation run of Deci/DeciLM-7B-instruct ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/Deci/DeciLM-7B-instruct - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [Deci/DeciLM-7B-instruct](https://huggingface.co/Deci/DeciLM-7B-instruct) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_Deci__DeciLM-7B-instruct", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-11T16:04:45.894054](https://huggingface.co/datasets/open-llm-leaderboard/details_Deci__DeciLM-7B-instruct/blob/main/results_2023-12-11T16-04-45.894054.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.6031249566837412, "acc_stderr": 0.033116595870832835, "acc_norm": 0.6063162588110805, "acc_norm_stderr": 0.03379377067263216, "mc1": 0.34761321909424725, "mc1_stderr": 0.016670769188897303, "mc2": 0.49753650846026637, "mc2_stderr": 0.015257351147769172 }, "harness|arc:challenge|25": { "acc": 0.5665529010238908, "acc_stderr": 0.014481376224558902, "acc_norm": 0.6100682593856656, "acc_norm_stderr": 0.014252959848892896 }, "harness|hellaswag|10": { "acc": 0.6278629755028878, "acc_stderr": 0.004823867761332466, "acc_norm": 0.8237402907787293, "acc_norm_stderr": 0.0038026223415290146 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.22, "acc_stderr": 0.0416333199893227, "acc_norm": 0.22, "acc_norm_stderr": 0.0416333199893227 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.5777777777777777, "acc_stderr": 0.042667634040995814, "acc_norm": 0.5777777777777777, "acc_norm_stderr": 0.042667634040995814 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.7039473684210527, "acc_stderr": 0.037150621549989056, "acc_norm": 0.7039473684210527, "acc_norm_stderr": 0.037150621549989056 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.53, "acc_stderr": 0.05016135580465919, "acc_norm": 0.53, "acc_norm_stderr": 0.05016135580465919 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.6377358490566037, "acc_stderr": 0.0295822451283843, "acc_norm": 0.6377358490566037, "acc_norm_stderr": 0.0295822451283843 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.6875, "acc_stderr": 0.038760854559127644, "acc_norm": 0.6875, "acc_norm_stderr": 0.038760854559127644 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.45, "acc_stderr": 0.04999999999999999, "acc_norm": 0.45, "acc_norm_stderr": 0.04999999999999999 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.49, "acc_stderr": 0.05024183937956912, "acc_norm": 0.49, "acc_norm_stderr": 0.05024183937956912 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.39, "acc_stderr": 0.04902071300001975, "acc_norm": 0.39, "acc_norm_stderr": 0.04902071300001975 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.6127167630057804, "acc_stderr": 0.03714325906302065, "acc_norm": 0.6127167630057804, "acc_norm_stderr": 0.03714325906302065 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.39215686274509803, "acc_stderr": 0.048580835742663454, "acc_norm": 0.39215686274509803, "acc_norm_stderr": 0.048580835742663454 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.77, "acc_stderr": 0.042295258468165065, "acc_norm": 0.77, "acc_norm_stderr": 0.042295258468165065 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.548936170212766, "acc_stderr": 0.032529096196131965, "acc_norm": 0.548936170212766, "acc_norm_stderr": 0.032529096196131965 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.4473684210526316, "acc_stderr": 0.046774730044911984, "acc_norm": 0.4473684210526316, "acc_norm_stderr": 0.046774730044911984 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.5310344827586206, "acc_stderr": 0.04158632762097828, "acc_norm": 0.5310344827586206, "acc_norm_stderr": 0.04158632762097828 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.4074074074074074, "acc_stderr": 0.025305906241590632, "acc_norm": 0.4074074074074074, "acc_norm_stderr": 0.025305906241590632 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.36507936507936506, "acc_stderr": 0.04306241259127153, "acc_norm": 0.36507936507936506, "acc_norm_stderr": 0.04306241259127153 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.32, "acc_stderr": 0.046882617226215034, "acc_norm": 0.32, "acc_norm_stderr": 0.046882617226215034 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.7290322580645161, "acc_stderr": 0.025284416114900156, "acc_norm": 0.7290322580645161, "acc_norm_stderr": 0.025284416114900156 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.4433497536945813, "acc_stderr": 0.03495334582162934, "acc_norm": 0.4433497536945813, "acc_norm_stderr": 0.03495334582162934 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.67, "acc_stderr": 0.04725815626252607, "acc_norm": 0.67, "acc_norm_stderr": 0.04725815626252607 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.7454545454545455, "acc_stderr": 0.03401506715249039, "acc_norm": 0.7454545454545455, "acc_norm_stderr": 0.03401506715249039 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.7121212121212122, "acc_stderr": 0.03225883512300992, "acc_norm": 0.7121212121212122, "acc_norm_stderr": 0.03225883512300992 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.8341968911917098, "acc_stderr": 0.026839845022314415, "acc_norm": 0.8341968911917098, "acc_norm_stderr": 0.026839845022314415 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.6076923076923076, "acc_stderr": 0.02475600038213095, "acc_norm": 0.6076923076923076, "acc_norm_stderr": 0.02475600038213095 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.37407407407407406, "acc_stderr": 0.02950286112895529, "acc_norm": 0.37407407407407406, "acc_norm_stderr": 0.02950286112895529 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.6260504201680672, "acc_stderr": 0.03142946637883708, "acc_norm": 0.6260504201680672, "acc_norm_stderr": 0.03142946637883708 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.3576158940397351, "acc_stderr": 0.03913453431177258, "acc_norm": 0.3576158940397351, "acc_norm_stderr": 0.03913453431177258 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.7926605504587156, "acc_stderr": 0.01738141556360868, "acc_norm": 0.7926605504587156, "acc_norm_stderr": 0.01738141556360868 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.48148148148148145, "acc_stderr": 0.034076320938540516, "acc_norm": 0.48148148148148145, "acc_norm_stderr": 0.034076320938540516 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.7843137254901961, "acc_stderr": 0.028867431449849313, "acc_norm": 0.7843137254901961, "acc_norm_stderr": 0.028867431449849313 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.7932489451476793, "acc_stderr": 0.0263616516683891, "acc_norm": 0.7932489451476793, "acc_norm_stderr": 0.0263616516683891 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.6816143497757847, "acc_stderr": 0.03126580522513713, "acc_norm": 0.6816143497757847, "acc_norm_stderr": 0.03126580522513713 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.6946564885496184, "acc_stderr": 0.040393149787245605, "acc_norm": 0.6946564885496184, "acc_norm_stderr": 0.040393149787245605 }, "harness|hendrycksTest-international_law|5": { "acc": 0.8016528925619835, "acc_stderr": 0.03640118271990946, "acc_norm": 0.8016528925619835, "acc_norm_stderr": 0.03640118271990946 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.7129629629629629, "acc_stderr": 0.043733130409147614, "acc_norm": 0.7129629629629629, "acc_norm_stderr": 0.043733130409147614 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.7055214723926381, "acc_stderr": 0.03581165790474082, "acc_norm": 0.7055214723926381, "acc_norm_stderr": 0.03581165790474082 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.41964285714285715, "acc_stderr": 0.04684099321077106, "acc_norm": 0.41964285714285715, "acc_norm_stderr": 0.04684099321077106 }, "harness|hendrycksTest-management|5": { "acc": 0.7281553398058253, "acc_stderr": 0.044052680241409216, "acc_norm": 0.7281553398058253, "acc_norm_stderr": 0.044052680241409216 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8717948717948718, "acc_stderr": 0.02190190511507333, "acc_norm": 0.8717948717948718, "acc_norm_stderr": 0.02190190511507333 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.66, "acc_stderr": 0.04760952285695237, "acc_norm": 0.66, "acc_norm_stderr": 0.04760952285695237 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.7931034482758621, "acc_stderr": 0.014485656041669176, "acc_norm": 0.7931034482758621, "acc_norm_stderr": 0.014485656041669176 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.6705202312138728, "acc_stderr": 0.025305258131879713, "acc_norm": 0.6705202312138728, "acc_norm_stderr": 0.025305258131879713 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.33631284916201115, "acc_stderr": 0.015801003729145887, "acc_norm": 0.33631284916201115, "acc_norm_stderr": 0.015801003729145887 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.6666666666666666, "acc_stderr": 0.02699254433929724, "acc_norm": 0.6666666666666666, "acc_norm_stderr": 0.02699254433929724 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.6334405144694534, "acc_stderr": 0.027368078243971625, "acc_norm": 0.6334405144694534, "acc_norm_stderr": 0.027368078243971625 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.6851851851851852, "acc_stderr": 0.02584224870090217, "acc_norm": 0.6851851851851852, "acc_norm_stderr": 0.02584224870090217 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.46099290780141844, "acc_stderr": 0.02973659252642444, "acc_norm": 0.46099290780141844, "acc_norm_stderr": 0.02973659252642444 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.4335071707953064, "acc_stderr": 0.012656810383983969, "acc_norm": 0.4335071707953064, "acc_norm_stderr": 0.012656810383983969 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.5625, "acc_stderr": 0.030134614954403924, "acc_norm": 0.5625, "acc_norm_stderr": 0.030134614954403924 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.6127450980392157, "acc_stderr": 0.019706875804085627, "acc_norm": 0.6127450980392157, "acc_norm_stderr": 0.019706875804085627 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.5909090909090909, "acc_stderr": 0.04709306978661895, "acc_norm": 0.5909090909090909, "acc_norm_stderr": 0.04709306978661895 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.7306122448979592, "acc_stderr": 0.02840125202902294, "acc_norm": 0.7306122448979592, "acc_norm_stderr": 0.02840125202902294 }, "harness|hendrycksTest-sociology|5": { "acc": 0.8159203980099502, "acc_stderr": 0.027403859410786838, "acc_norm": 0.8159203980099502, "acc_norm_stderr": 0.027403859410786838 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.79, "acc_stderr": 0.040936018074033256, "acc_norm": 0.79, "acc_norm_stderr": 0.040936018074033256 }, "harness|hendrycksTest-virology|5": { "acc": 0.463855421686747, "acc_stderr": 0.03882310850890594, "acc_norm": 0.463855421686747, "acc_norm_stderr": 0.03882310850890594 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.783625730994152, "acc_stderr": 0.031581495393387324, "acc_norm": 0.783625730994152, "acc_norm_stderr": 0.031581495393387324 }, "harness|truthfulqa:mc|0": { "mc1": 0.34761321909424725, "mc1_stderr": 0.016670769188897303, "mc2": 0.49753650846026637, "mc2_stderr": 0.015257351147769172 }, "harness|winogrande|5": { "acc": 0.7971586424625099, "acc_stderr": 0.011301439925936643 }, "harness|gsm8k|5": { "acc": 0.46019711902956784, "acc_stderr": 0.013728776714099363 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_Deci__DeciLM-7B-instruct
[ "region:us" ]
2023-12-11T16:07:38+00:00
{"pretty_name": "Evaluation run of Deci/DeciLM-7B-instruct", "dataset_summary": "Dataset automatically created during the evaluation run of model [Deci/DeciLM-7B-instruct](https://huggingface.co/Deci/DeciLM-7B-instruct) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_Deci-early-access__DeciLM-7B-instruct-early_private\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-11T16:04:45.894054](https://huggingface.co/datasets/open-llm-leaderboard/details_Deci-early-access__DeciLM-7B-instruct-early_private/blob/main/results_2023-12-11T16-04-45.894054.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.6031249566837412,\n \"acc_stderr\": 0.033116595870832835,\n \"acc_norm\": 0.6063162588110805,\n \"acc_norm_stderr\": 0.03379377067263216,\n \"mc1\": 0.34761321909424725,\n \"mc1_stderr\": 0.016670769188897303,\n \"mc2\": 0.49753650846026637,\n \"mc2_stderr\": 0.015257351147769172\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.5665529010238908,\n \"acc_stderr\": 0.014481376224558902,\n \"acc_norm\": 0.6100682593856656,\n \"acc_norm_stderr\": 0.014252959848892896\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6278629755028878,\n \"acc_stderr\": 0.004823867761332466,\n \"acc_norm\": 0.8237402907787293,\n \"acc_norm_stderr\": 0.0038026223415290146\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.22,\n \"acc_stderr\": 0.0416333199893227,\n \"acc_norm\": 0.22,\n \"acc_norm_stderr\": 0.0416333199893227\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.5777777777777777,\n \"acc_stderr\": 0.042667634040995814,\n \"acc_norm\": 0.5777777777777777,\n \"acc_norm_stderr\": 0.042667634040995814\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.7039473684210527,\n \"acc_stderr\": 0.037150621549989056,\n \"acc_norm\": 0.7039473684210527,\n \"acc_norm_stderr\": 0.037150621549989056\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.53,\n \"acc_stderr\": 0.05016135580465919,\n \"acc_norm\": 0.53,\n \"acc_norm_stderr\": 0.05016135580465919\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.6377358490566037,\n \"acc_stderr\": 0.0295822451283843,\n \"acc_norm\": 0.6377358490566037,\n \"acc_norm_stderr\": 0.0295822451283843\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.6875,\n \"acc_stderr\": 0.038760854559127644,\n \"acc_norm\": 0.6875,\n \"acc_norm_stderr\": 0.038760854559127644\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.45,\n \"acc_stderr\": 0.04999999999999999,\n \"acc_norm\": 0.45,\n \"acc_norm_stderr\": 0.04999999999999999\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.49,\n \"acc_stderr\": 0.05024183937956912,\n \"acc_norm\": 0.49,\n \"acc_norm_stderr\": 0.05024183937956912\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.39,\n \"acc_stderr\": 0.04902071300001975,\n \"acc_norm\": 0.39,\n \"acc_norm_stderr\": 0.04902071300001975\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.6127167630057804,\n \"acc_stderr\": 0.03714325906302065,\n \"acc_norm\": 0.6127167630057804,\n \"acc_norm_stderr\": 0.03714325906302065\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.39215686274509803,\n \"acc_stderr\": 0.048580835742663454,\n \"acc_norm\": 0.39215686274509803,\n \"acc_norm_stderr\": 0.048580835742663454\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.77,\n \"acc_stderr\": 0.042295258468165065,\n \"acc_norm\": 0.77,\n \"acc_norm_stderr\": 0.042295258468165065\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.548936170212766,\n \"acc_stderr\": 0.032529096196131965,\n \"acc_norm\": 0.548936170212766,\n \"acc_norm_stderr\": 0.032529096196131965\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.4473684210526316,\n \"acc_stderr\": 0.046774730044911984,\n \"acc_norm\": 0.4473684210526316,\n \"acc_norm_stderr\": 0.046774730044911984\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5310344827586206,\n \"acc_stderr\": 0.04158632762097828,\n \"acc_norm\": 0.5310344827586206,\n \"acc_norm_stderr\": 0.04158632762097828\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.4074074074074074,\n \"acc_stderr\": 0.025305906241590632,\n \"acc_norm\": 0.4074074074074074,\n \"acc_norm_stderr\": 0.025305906241590632\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.36507936507936506,\n \"acc_stderr\": 0.04306241259127153,\n \"acc_norm\": 0.36507936507936506,\n \"acc_norm_stderr\": 0.04306241259127153\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.32,\n \"acc_stderr\": 0.046882617226215034,\n \"acc_norm\": 0.32,\n \"acc_norm_stderr\": 0.046882617226215034\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.7290322580645161,\n \"acc_stderr\": 0.025284416114900156,\n \"acc_norm\": 0.7290322580645161,\n \"acc_norm_stderr\": 0.025284416114900156\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.4433497536945813,\n \"acc_stderr\": 0.03495334582162934,\n \"acc_norm\": 0.4433497536945813,\n \"acc_norm_stderr\": 0.03495334582162934\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.67,\n \"acc_stderr\": 0.04725815626252607,\n \"acc_norm\": 0.67,\n \"acc_norm_stderr\": 0.04725815626252607\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7454545454545455,\n \"acc_stderr\": 0.03401506715249039,\n \"acc_norm\": 0.7454545454545455,\n \"acc_norm_stderr\": 0.03401506715249039\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.7121212121212122,\n \"acc_stderr\": 0.03225883512300992,\n \"acc_norm\": 0.7121212121212122,\n \"acc_norm_stderr\": 0.03225883512300992\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.8341968911917098,\n \"acc_stderr\": 0.026839845022314415,\n \"acc_norm\": 0.8341968911917098,\n \"acc_norm_stderr\": 0.026839845022314415\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.6076923076923076,\n \"acc_stderr\": 0.02475600038213095,\n \"acc_norm\": 0.6076923076923076,\n \"acc_norm_stderr\": 0.02475600038213095\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.37407407407407406,\n \"acc_stderr\": 0.02950286112895529,\n \"acc_norm\": 0.37407407407407406,\n \"acc_norm_stderr\": 0.02950286112895529\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.6260504201680672,\n \"acc_stderr\": 0.03142946637883708,\n \"acc_norm\": 0.6260504201680672,\n \"acc_norm_stderr\": 0.03142946637883708\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.3576158940397351,\n \"acc_stderr\": 0.03913453431177258,\n \"acc_norm\": 0.3576158940397351,\n \"acc_norm_stderr\": 0.03913453431177258\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.7926605504587156,\n \"acc_stderr\": 0.01738141556360868,\n \"acc_norm\": 0.7926605504587156,\n \"acc_norm_stderr\": 0.01738141556360868\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.48148148148148145,\n \"acc_stderr\": 0.034076320938540516,\n \"acc_norm\": 0.48148148148148145,\n \"acc_norm_stderr\": 0.034076320938540516\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.7843137254901961,\n \"acc_stderr\": 0.028867431449849313,\n \"acc_norm\": 0.7843137254901961,\n \"acc_norm_stderr\": 0.028867431449849313\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.7932489451476793,\n \"acc_stderr\": 0.0263616516683891,\n \"acc_norm\": 0.7932489451476793,\n \"acc_norm_stderr\": 0.0263616516683891\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6816143497757847,\n \"acc_stderr\": 0.03126580522513713,\n \"acc_norm\": 0.6816143497757847,\n \"acc_norm_stderr\": 0.03126580522513713\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.6946564885496184,\n \"acc_stderr\": 0.040393149787245605,\n \"acc_norm\": 0.6946564885496184,\n \"acc_norm_stderr\": 0.040393149787245605\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.8016528925619835,\n \"acc_stderr\": 0.03640118271990946,\n \"acc_norm\": 0.8016528925619835,\n \"acc_norm_stderr\": 0.03640118271990946\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.7129629629629629,\n \"acc_stderr\": 0.043733130409147614,\n \"acc_norm\": 0.7129629629629629,\n \"acc_norm_stderr\": 0.043733130409147614\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.7055214723926381,\n \"acc_stderr\": 0.03581165790474082,\n \"acc_norm\": 0.7055214723926381,\n \"acc_norm_stderr\": 0.03581165790474082\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.41964285714285715,\n \"acc_stderr\": 0.04684099321077106,\n \"acc_norm\": 0.41964285714285715,\n \"acc_norm_stderr\": 0.04684099321077106\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.7281553398058253,\n \"acc_stderr\": 0.044052680241409216,\n \"acc_norm\": 0.7281553398058253,\n \"acc_norm_stderr\": 0.044052680241409216\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8717948717948718,\n \"acc_stderr\": 0.02190190511507333,\n \"acc_norm\": 0.8717948717948718,\n \"acc_norm_stderr\": 0.02190190511507333\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.66,\n \"acc_stderr\": 0.04760952285695237,\n \"acc_norm\": 0.66,\n \"acc_norm_stderr\": 0.04760952285695237\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.7931034482758621,\n \"acc_stderr\": 0.014485656041669176,\n \"acc_norm\": 0.7931034482758621,\n \"acc_norm_stderr\": 0.014485656041669176\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.6705202312138728,\n \"acc_stderr\": 0.025305258131879713,\n \"acc_norm\": 0.6705202312138728,\n \"acc_norm_stderr\": 0.025305258131879713\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.33631284916201115,\n \"acc_stderr\": 0.015801003729145887,\n \"acc_norm\": 0.33631284916201115,\n \"acc_norm_stderr\": 0.015801003729145887\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.6666666666666666,\n \"acc_stderr\": 0.02699254433929724,\n \"acc_norm\": 0.6666666666666666,\n \"acc_norm_stderr\": 0.02699254433929724\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.6334405144694534,\n \"acc_stderr\": 0.027368078243971625,\n \"acc_norm\": 0.6334405144694534,\n \"acc_norm_stderr\": 0.027368078243971625\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.6851851851851852,\n \"acc_stderr\": 0.02584224870090217,\n \"acc_norm\": 0.6851851851851852,\n \"acc_norm_stderr\": 0.02584224870090217\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.46099290780141844,\n \"acc_stderr\": 0.02973659252642444,\n \"acc_norm\": 0.46099290780141844,\n \"acc_norm_stderr\": 0.02973659252642444\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.4335071707953064,\n \"acc_stderr\": 0.012656810383983969,\n \"acc_norm\": 0.4335071707953064,\n \"acc_norm_stderr\": 0.012656810383983969\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.5625,\n \"acc_stderr\": 0.030134614954403924,\n \"acc_norm\": 0.5625,\n \"acc_norm_stderr\": 0.030134614954403924\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.6127450980392157,\n \"acc_stderr\": 0.019706875804085627,\n \"acc_norm\": 0.6127450980392157,\n \"acc_norm_stderr\": 0.019706875804085627\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.5909090909090909,\n \"acc_stderr\": 0.04709306978661895,\n \"acc_norm\": 0.5909090909090909,\n \"acc_norm_stderr\": 0.04709306978661895\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.7306122448979592,\n \"acc_stderr\": 0.02840125202902294,\n \"acc_norm\": 0.7306122448979592,\n \"acc_norm_stderr\": 0.02840125202902294\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.8159203980099502,\n \"acc_stderr\": 0.027403859410786838,\n \"acc_norm\": 0.8159203980099502,\n \"acc_norm_stderr\": 0.027403859410786838\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.79,\n \"acc_stderr\": 0.040936018074033256,\n \"acc_norm\": 0.79,\n \"acc_norm_stderr\": 0.040936018074033256\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.463855421686747,\n \"acc_stderr\": 0.03882310850890594,\n \"acc_norm\": 0.463855421686747,\n \"acc_norm_stderr\": 0.03882310850890594\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.783625730994152,\n \"acc_stderr\": 0.031581495393387324,\n \"acc_norm\": 0.783625730994152,\n \"acc_norm_stderr\": 0.031581495393387324\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.34761321909424725,\n \"mc1_stderr\": 0.016670769188897303,\n \"mc2\": 0.49753650846026637,\n \"mc2_stderr\": 0.015257351147769172\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7971586424625099,\n \"acc_stderr\": 0.011301439925936643\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.46019711902956784,\n \"acc_stderr\": 0.013728776714099363\n }\n}\n```", "repo_url": "https://huggingface.co/Deci/DeciLM-7B-instruct", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|arc:challenge|25_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|gsm8k|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hellaswag|10_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-11T16-04-45.894054.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["**/details_harness|winogrande|5_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-11T16-04-45.894054.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_11T16_04_45.894054", "path": ["results_2023-12-11T16-04-45.894054.parquet"]}, {"split": "latest", "path": ["results_2023-12-11T16-04-45.894054.parquet"]}]}]}
2023-12-12T13:57:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of Deci/DeciLM-7B-instruct ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model Deci/DeciLM-7B-instruct on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-11T16:04:45.894054(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of Deci/DeciLM-7B-instruct", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model Deci/DeciLM-7B-instruct on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-11T16:04:45.894054(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of Deci/DeciLM-7B-instruct", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model Deci/DeciLM-7B-instruct on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-11T16:04:45.894054(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 19, 31, 168, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of Deci/DeciLM-7B-instruct## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model Deci/DeciLM-7B-instruct on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-11T16:04:45.894054(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
b07750e89b08a9dcebf211eff42d2fe2c846a78f
# Dataset Card for "ML2021_ASR_ST" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ky552/ML2021_ASR_ST
[ "region:us" ]
2023-12-11T16:14:38+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "dev", "path": "data/dev-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "transcription", "dtype": "string"}, {"name": "translation", "dtype": "string"}, {"name": "file", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6392221146.655, "num_examples": 17779}, {"name": "dev", "num_bytes": 786905707.92, "num_examples": 2997}, {"name": "test", "num_bytes": 4054213966.96, "num_examples": 14916}], "download_size": 8220600841, "dataset_size": 11233340821.535}}
2023-12-11T16:44:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ML2021_ASR_ST" More Information needed
[ "# Dataset Card for \"ML2021_ASR_ST\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ML2021_ASR_ST\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ML2021_ASR_ST\"\n\nMore Information needed" ]
d75aba1981c74510be1f985a49a3be342168b5f6
# Dataset Card for "hugging_face" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SpongeBash/hugging_face
[ "region:us" ]
2023-12-11T16:55:14+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 75931.0, "num_examples": 12}], "download_size": 77302, "dataset_size": 75931.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-11T16:55:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hugging_face" More Information needed
[ "# Dataset Card for \"hugging_face\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hugging_face\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hugging_face\"\n\nMore Information needed" ]
1a137832c78f2a18978023263dbbbbde60b9802b
Categorization and Segregation, version1: - Bucket 1 Allocation: - If the Petri dish contains P. aeruginosa and/or E. coli colonies. - Bucket 2 Allocation: - If the Petri dish contains S. aureus and B. subtilis colonies. - Bucket 3 Allocation: - For Petri dishes containing colonies other than the specified types or if no colonies are present.
adamrian/petri-dish
[ "region:us" ]
2023-12-11T17:01:20+00:00
{}
2023-12-12T02:05:40+00:00
[]
[]
TAGS #region-us
Categorization and Segregation, version1: - Bucket 1 Allocation: - If the Petri dish contains P. aeruginosa and/or E. coli colonies. - Bucket 2 Allocation: - If the Petri dish contains S. aureus and B. subtilis colonies. - Bucket 3 Allocation: - For Petri dishes containing colonies other than the specified types or if no colonies are present.
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
83b847cefc8014eb2b1136403664ee2e71268d1b
# Dataset Card for "mscoco_simplified_falcon" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
BubbleJoe/mscoco_simplified_falcon
[ "region:us" ]
2023-12-11T17:40:41+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "restval", "path": "data/restval-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "sentids", "dtype": "int64"}, {"name": "sentences", "dtype": "string"}, {"name": "simplified", "dtype": "string"}, {"name": "result", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 54921466, "num_examples": 414113}, {"name": "restval", "num_bytes": 20216246, "num_examples": 152634}, {"name": "validation", "num_bytes": 3314945, "num_examples": 25010}, {"name": "test", "num_bytes": 3312409, "num_examples": 25010}], "download_size": 43521253, "dataset_size": 81765066}}
2023-12-12T21:43:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "mscoco_simplified_falcon" More Information needed
[ "# Dataset Card for \"mscoco_simplified_falcon\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"mscoco_simplified_falcon\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"mscoco_simplified_falcon\"\n\nMore Information needed" ]
eedaf9060827b052204c0d854d8a39172dc74ba9
# Dataset Card for "hoopers" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ppower1/hoopers
[ "region:us" ]
2023-12-11T17:44:49+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 17775, "num_examples": 100}], "download_size": 3106, "dataset_size": 17775}}
2023-12-11T17:53:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hoopers" More Information needed
[ "# Dataset Card for \"hoopers\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hoopers\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hoopers\"\n\nMore Information needed" ]
e8423695c7a4f5dc326d0ae5d92b934747c358f5
This is a WIP dataset used to identify Prompt injections. This dataset contains legitimate prompts and jailbreak prompts. It also contains combinations of phrases that could potentially be used to jailbreak llms inspired from the [rebuff project](https://www.rebuff.ai). Reference: 1. [jailbreakchat](https://www.jailbreakchat.com) 2. Srikanth Srinivas. (2023). Swype.com Dataset. Swype.com. Available at: [Swype.com Dataset](https://swype.com). Email: [email protected] 3. [rebuff project](https://www.rebuff.ai)
predictionguard/promptinjections
[ "license:mit", "region:us" ]
2023-12-11T17:48:07+00:00
{"license": "mit", "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "INJECTION", "dtype": "bool"}, {"name": "Unnamed: 0", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 5879405, "num_examples": 17678}], "download_size": 3168127, "dataset_size": 5879405}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-13T14:16:20+00:00
[]
[]
TAGS #license-mit #region-us
This is a WIP dataset used to identify Prompt injections. This dataset contains legitimate prompts and jailbreak prompts. It also contains combinations of phrases that could potentially be used to jailbreak llms inspired from the rebuff project. Reference: 1. jailbreakchat 2. Srikanth Srinivas. (2023). URL Dataset. URL. Available at: URL Dataset. Email: s@URL 3. rebuff project
[]
[ "TAGS\n#license-mit #region-us \n" ]
[ 11 ]
[ "passage: TAGS\n#license-mit #region-us \n" ]
f91ee0f9df1d1f1d784e5240ea61291c50ececf7
# Dataset Card for "semeval-task-8-a-mono-v2-test-paraphrase-2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kpriyanshu256/semeval-task-8-a-mono-v2-test-paraphrase-2
[ "region:us" ]
2023-12-11T18:06:38+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "model", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "paraphrase", "dtype": "string"}, {"name": "paraphrase2", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 23387836, "num_examples": 5000}], "download_size": 13353734, "dataset_size": 23387836}}
2023-12-11T18:06:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "semeval-task-8-a-mono-v2-test-paraphrase-2" More Information needed
[ "# Dataset Card for \"semeval-task-8-a-mono-v2-test-paraphrase-2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"semeval-task-8-a-mono-v2-test-paraphrase-2\"\n\nMore Information needed" ]
[ 6, 30 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"semeval-task-8-a-mono-v2-test-paraphrase-2\"\n\nMore Information needed" ]
16af2ad182b956e94b1beefbfef1c3bf6084dce2
# Dataset Card for Evaluation run of mistralai/Mixtral-8x7B-v0.1 <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [mistralai/Mixtral-8x7B-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_mistralai__Mixtral-8x7B-v0.1", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2024-01-04T16:34:48.985318](https://huggingface.co/datasets/open-llm-leaderboard/details_mistralai__Mixtral-8x7B-v0.1/blob/main/results_2024-01-04T16-34-48.985318.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.7159135789734996, "acc_stderr": 0.02999272353761279, "acc_norm": 0.7203233140735184, "acc_norm_stderr": 0.03056866632319033, "mc1": 0.3182374541003672, "mc1_stderr": 0.01630598864892061, "mc2": 0.4680543300316138, "mc2_stderr": 0.014120170542973978 }, "harness|arc:challenge|25": { "acc": 0.6373720136518771, "acc_stderr": 0.014049106564955002, "acc_norm": 0.6638225255972696, "acc_norm_stderr": 0.013804855026205761 }, "harness|hellaswag|10": { "acc": 0.6695877315275841, "acc_stderr": 0.004694002781939571, "acc_norm": 0.8645688109938259, "acc_norm_stderr": 0.003414842236517104 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.34, "acc_stderr": 0.04760952285695236, "acc_norm": 0.34, "acc_norm_stderr": 0.04760952285695236 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.7185185185185186, "acc_stderr": 0.03885004245800254, "acc_norm": 0.7185185185185186, "acc_norm_stderr": 0.03885004245800254 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.8289473684210527, "acc_stderr": 0.030643607071677098, "acc_norm": 0.8289473684210527, "acc_norm_stderr": 0.030643607071677098 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.76, "acc_stderr": 0.04292346959909283, "acc_norm": 0.76, "acc_norm_stderr": 0.04292346959909283 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.7849056603773585, "acc_stderr": 0.02528839450289137, "acc_norm": 0.7849056603773585, "acc_norm_stderr": 0.02528839450289137 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.8680555555555556, "acc_stderr": 0.02830096838204443, "acc_norm": 0.8680555555555556, "acc_norm_stderr": 0.02830096838204443 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.54, "acc_stderr": 0.05009082659620332, "acc_norm": 0.54, "acc_norm_stderr": 0.05009082659620332 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.63, "acc_stderr": 0.04852365870939099, "acc_norm": 0.63, "acc_norm_stderr": 0.04852365870939099 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.46, "acc_stderr": 0.05009082659620332, "acc_norm": 0.46, "acc_norm_stderr": 0.05009082659620332 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.6994219653179191, "acc_stderr": 0.03496101481191179, "acc_norm": 0.6994219653179191, "acc_norm_stderr": 0.03496101481191179 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.46078431372549017, "acc_stderr": 0.04959859966384181, "acc_norm": 0.46078431372549017, "acc_norm_stderr": 0.04959859966384181 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.81, "acc_stderr": 0.039427724440366234, "acc_norm": 0.81, "acc_norm_stderr": 0.039427724440366234 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.6808510638297872, "acc_stderr": 0.030472973363380035, "acc_norm": 0.6808510638297872, "acc_norm_stderr": 0.030472973363380035 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.6491228070175439, "acc_stderr": 0.04489539350270698, "acc_norm": 0.6491228070175439, "acc_norm_stderr": 0.04489539350270698 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.6896551724137931, "acc_stderr": 0.03855289616378948, "acc_norm": 0.6896551724137931, "acc_norm_stderr": 0.03855289616378948 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.48148148148148145, "acc_stderr": 0.025733641991838987, "acc_norm": 0.48148148148148145, "acc_norm_stderr": 0.025733641991838987 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.5634920634920635, "acc_stderr": 0.04435932892851466, "acc_norm": 0.5634920634920635, "acc_norm_stderr": 0.04435932892851466 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.51, "acc_stderr": 0.05024183937956912, "acc_norm": 0.51, "acc_norm_stderr": 0.05024183937956912 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.8419354838709677, "acc_stderr": 0.020752831511875274, "acc_norm": 0.8419354838709677, "acc_norm_stderr": 0.020752831511875274 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.6354679802955665, "acc_stderr": 0.0338640574606209, "acc_norm": 0.6354679802955665, "acc_norm_stderr": 0.0338640574606209 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.72, "acc_stderr": 0.04512608598542127, "acc_norm": 0.72, "acc_norm_stderr": 0.04512608598542127 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.8181818181818182, "acc_stderr": 0.030117688929503585, "acc_norm": 0.8181818181818182, "acc_norm_stderr": 0.030117688929503585 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.8636363636363636, "acc_stderr": 0.024450155973189835, "acc_norm": 0.8636363636363636, "acc_norm_stderr": 0.024450155973189835 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.9378238341968912, "acc_stderr": 0.017426974154240524, "acc_norm": 0.9378238341968912, "acc_norm_stderr": 0.017426974154240524 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.7051282051282052, "acc_stderr": 0.0231193627582323, "acc_norm": 0.7051282051282052, "acc_norm_stderr": 0.0231193627582323 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.3851851851851852, "acc_stderr": 0.029670906124630886, "acc_norm": 0.3851851851851852, "acc_norm_stderr": 0.029670906124630886 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.7857142857142857, "acc_stderr": 0.026653531596715494, "acc_norm": 0.7857142857142857, "acc_norm_stderr": 0.026653531596715494 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.4900662251655629, "acc_stderr": 0.04081677107248436, "acc_norm": 0.4900662251655629, "acc_norm_stderr": 0.04081677107248436 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.8807339449541285, "acc_stderr": 0.013895729292588964, "acc_norm": 0.8807339449541285, "acc_norm_stderr": 0.013895729292588964 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.6481481481481481, "acc_stderr": 0.03256850570293647, "acc_norm": 0.6481481481481481, "acc_norm_stderr": 0.03256850570293647 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.8480392156862745, "acc_stderr": 0.025195658428931792, "acc_norm": 0.8480392156862745, "acc_norm_stderr": 0.025195658428931792 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.890295358649789, "acc_stderr": 0.02034340073486884, "acc_norm": 0.890295358649789, "acc_norm_stderr": 0.02034340073486884 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.7802690582959642, "acc_stderr": 0.027790177064383595, "acc_norm": 0.7802690582959642, "acc_norm_stderr": 0.027790177064383595 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.8091603053435115, "acc_stderr": 0.03446513350752598, "acc_norm": 0.8091603053435115, "acc_norm_stderr": 0.03446513350752598 }, "harness|hendrycksTest-international_law|5": { "acc": 0.8760330578512396, "acc_stderr": 0.03008309871603521, "acc_norm": 0.8760330578512396, "acc_norm_stderr": 0.03008309871603521 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.8333333333333334, "acc_stderr": 0.03602814176392645, "acc_norm": 0.8333333333333334, "acc_norm_stderr": 0.03602814176392645 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.7730061349693251, "acc_stderr": 0.032910995786157686, "acc_norm": 0.7730061349693251, "acc_norm_stderr": 0.032910995786157686 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.5357142857142857, "acc_stderr": 0.04733667890053756, "acc_norm": 0.5357142857142857, "acc_norm_stderr": 0.04733667890053756 }, "harness|hendrycksTest-management|5": { "acc": 0.883495145631068, "acc_stderr": 0.03176683948640407, "acc_norm": 0.883495145631068, "acc_norm_stderr": 0.03176683948640407 }, "harness|hendrycksTest-marketing|5": { "acc": 0.9188034188034188, "acc_stderr": 0.017893784904018533, "acc_norm": 0.9188034188034188, "acc_norm_stderr": 0.017893784904018533 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.78, "acc_stderr": 0.04163331998932263, "acc_norm": 0.78, "acc_norm_stderr": 0.04163331998932263 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.8748403575989783, "acc_stderr": 0.011832954239305723, "acc_norm": 0.8748403575989783, "acc_norm_stderr": 0.011832954239305723 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.7976878612716763, "acc_stderr": 0.021628077380196124, "acc_norm": 0.7976878612716763, "acc_norm_stderr": 0.021628077380196124 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.4011173184357542, "acc_stderr": 0.01639222189940708, "acc_norm": 0.4011173184357542, "acc_norm_stderr": 0.01639222189940708 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.8235294117647058, "acc_stderr": 0.021828596053108402, "acc_norm": 0.8235294117647058, "acc_norm_stderr": 0.021828596053108402 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.7845659163987139, "acc_stderr": 0.023350225475471442, "acc_norm": 0.7845659163987139, "acc_norm_stderr": 0.023350225475471442 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.8395061728395061, "acc_stderr": 0.020423955354778027, "acc_norm": 0.8395061728395061, "acc_norm_stderr": 0.020423955354778027 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.5177304964539007, "acc_stderr": 0.02980873964223777, "acc_norm": 0.5177304964539007, "acc_norm_stderr": 0.02980873964223777 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.5319426336375489, "acc_stderr": 0.012744149704869645, "acc_norm": 0.5319426336375489, "acc_norm_stderr": 0.012744149704869645 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.8125, "acc_stderr": 0.023709788253811766, "acc_norm": 0.8125, "acc_norm_stderr": 0.023709788253811766 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.7843137254901961, "acc_stderr": 0.016639319350313264, "acc_norm": 0.7843137254901961, "acc_norm_stderr": 0.016639319350313264 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.7, "acc_stderr": 0.04389311454644287, "acc_norm": 0.7, "acc_norm_stderr": 0.04389311454644287 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.7877551020408163, "acc_stderr": 0.026176967197866767, "acc_norm": 0.7877551020408163, "acc_norm_stderr": 0.026176967197866767 }, "harness|hendrycksTest-sociology|5": { "acc": 0.8905472636815921, "acc_stderr": 0.022076326101824657, "acc_norm": 0.8905472636815921, "acc_norm_stderr": 0.022076326101824657 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.92, "acc_stderr": 0.0272659924344291, "acc_norm": 0.92, "acc_norm_stderr": 0.0272659924344291 }, "harness|hendrycksTest-virology|5": { "acc": 0.5120481927710844, "acc_stderr": 0.03891364495835817, "acc_norm": 0.5120481927710844, "acc_norm_stderr": 0.03891364495835817 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8771929824561403, "acc_stderr": 0.02517298435015575, "acc_norm": 0.8771929824561403, "acc_norm_stderr": 0.02517298435015575 }, "harness|truthfulqa:mc|0": { "mc1": 0.3182374541003672, "mc1_stderr": 0.01630598864892061, "mc2": 0.4680543300316138, "mc2_stderr": 0.014120170542973978 }, "harness|winogrande|5": { "acc": 0.8168902920284136, "acc_stderr": 0.01086977863316836 }, "harness|gsm8k|5": { "acc": 0.576194086429113, "acc_stderr": 0.01361163200881036 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_mistralai__Mixtral-8x7B-v0.1
[ "region:us" ]
2023-12-11T18:07:04+00:00
{"pretty_name": "Evaluation run of mistralai/Mixtral-8x7B-v0.1", "dataset_summary": "Dataset automatically created during the evaluation run of model [mistralai/Mixtral-8x7B-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_mistralai__Mixtral-8x7B-v0.1\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2024-01-04T16:34:48.985318](https://huggingface.co/datasets/open-llm-leaderboard/details_mistralai__Mixtral-8x7B-v0.1/blob/main/results_2024-01-04T16-34-48.985318.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.7159135789734996,\n \"acc_stderr\": 0.02999272353761279,\n \"acc_norm\": 0.7203233140735184,\n \"acc_norm_stderr\": 0.03056866632319033,\n \"mc1\": 0.3182374541003672,\n \"mc1_stderr\": 0.01630598864892061,\n \"mc2\": 0.4680543300316138,\n \"mc2_stderr\": 0.014120170542973978\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.6373720136518771,\n \"acc_stderr\": 0.014049106564955002,\n \"acc_norm\": 0.6638225255972696,\n \"acc_norm_stderr\": 0.013804855026205761\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6695877315275841,\n \"acc_stderr\": 0.004694002781939571,\n \"acc_norm\": 0.8645688109938259,\n \"acc_norm_stderr\": 0.003414842236517104\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.34,\n \"acc_stderr\": 0.04760952285695236,\n \"acc_norm\": 0.34,\n \"acc_norm_stderr\": 0.04760952285695236\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.7185185185185186,\n \"acc_stderr\": 0.03885004245800254,\n \"acc_norm\": 0.7185185185185186,\n \"acc_norm_stderr\": 0.03885004245800254\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.8289473684210527,\n \"acc_stderr\": 0.030643607071677098,\n \"acc_norm\": 0.8289473684210527,\n \"acc_norm_stderr\": 0.030643607071677098\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.76,\n \"acc_stderr\": 0.04292346959909283,\n \"acc_norm\": 0.76,\n \"acc_norm_stderr\": 0.04292346959909283\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.7849056603773585,\n \"acc_stderr\": 0.02528839450289137,\n \"acc_norm\": 0.7849056603773585,\n \"acc_norm_stderr\": 0.02528839450289137\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.8680555555555556,\n \"acc_stderr\": 0.02830096838204443,\n \"acc_norm\": 0.8680555555555556,\n \"acc_norm_stderr\": 0.02830096838204443\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.54,\n \"acc_stderr\": 0.05009082659620332,\n \"acc_norm\": 0.54,\n \"acc_norm_stderr\": 0.05009082659620332\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.63,\n \"acc_stderr\": 0.04852365870939099,\n \"acc_norm\": 0.63,\n \"acc_norm_stderr\": 0.04852365870939099\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.46,\n \"acc_stderr\": 0.05009082659620332,\n \"acc_norm\": 0.46,\n \"acc_norm_stderr\": 0.05009082659620332\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.6994219653179191,\n \"acc_stderr\": 0.03496101481191179,\n \"acc_norm\": 0.6994219653179191,\n \"acc_norm_stderr\": 0.03496101481191179\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.46078431372549017,\n \"acc_stderr\": 0.04959859966384181,\n \"acc_norm\": 0.46078431372549017,\n \"acc_norm_stderr\": 0.04959859966384181\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.81,\n \"acc_stderr\": 0.039427724440366234,\n \"acc_norm\": 0.81,\n \"acc_norm_stderr\": 0.039427724440366234\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.6808510638297872,\n \"acc_stderr\": 0.030472973363380035,\n \"acc_norm\": 0.6808510638297872,\n \"acc_norm_stderr\": 0.030472973363380035\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.6491228070175439,\n \"acc_stderr\": 0.04489539350270698,\n \"acc_norm\": 0.6491228070175439,\n \"acc_norm_stderr\": 0.04489539350270698\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.6896551724137931,\n \"acc_stderr\": 0.03855289616378948,\n \"acc_norm\": 0.6896551724137931,\n \"acc_norm_stderr\": 0.03855289616378948\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.48148148148148145,\n \"acc_stderr\": 0.025733641991838987,\n \"acc_norm\": 0.48148148148148145,\n \"acc_norm_stderr\": 0.025733641991838987\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.5634920634920635,\n \"acc_stderr\": 0.04435932892851466,\n \"acc_norm\": 0.5634920634920635,\n \"acc_norm_stderr\": 0.04435932892851466\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.51,\n \"acc_stderr\": 0.05024183937956912,\n \"acc_norm\": 0.51,\n \"acc_norm_stderr\": 0.05024183937956912\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.8419354838709677,\n \"acc_stderr\": 0.020752831511875274,\n \"acc_norm\": 0.8419354838709677,\n \"acc_norm_stderr\": 0.020752831511875274\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.6354679802955665,\n \"acc_stderr\": 0.0338640574606209,\n \"acc_norm\": 0.6354679802955665,\n \"acc_norm_stderr\": 0.0338640574606209\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.72,\n \"acc_stderr\": 0.04512608598542127,\n \"acc_norm\": 0.72,\n \"acc_norm_stderr\": 0.04512608598542127\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.8181818181818182,\n \"acc_stderr\": 0.030117688929503585,\n \"acc_norm\": 0.8181818181818182,\n \"acc_norm_stderr\": 0.030117688929503585\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.8636363636363636,\n \"acc_stderr\": 0.024450155973189835,\n \"acc_norm\": 0.8636363636363636,\n \"acc_norm_stderr\": 0.024450155973189835\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.9378238341968912,\n \"acc_stderr\": 0.017426974154240524,\n \"acc_norm\": 0.9378238341968912,\n \"acc_norm_stderr\": 0.017426974154240524\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.7051282051282052,\n \"acc_stderr\": 0.0231193627582323,\n \"acc_norm\": 0.7051282051282052,\n \"acc_norm_stderr\": 0.0231193627582323\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.3851851851851852,\n \"acc_stderr\": 0.029670906124630886,\n \"acc_norm\": 0.3851851851851852,\n \"acc_norm_stderr\": 0.029670906124630886\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.7857142857142857,\n \"acc_stderr\": 0.026653531596715494,\n \"acc_norm\": 0.7857142857142857,\n \"acc_norm_stderr\": 0.026653531596715494\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.4900662251655629,\n \"acc_stderr\": 0.04081677107248436,\n \"acc_norm\": 0.4900662251655629,\n \"acc_norm_stderr\": 0.04081677107248436\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.8807339449541285,\n \"acc_stderr\": 0.013895729292588964,\n \"acc_norm\": 0.8807339449541285,\n \"acc_norm_stderr\": 0.013895729292588964\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.6481481481481481,\n \"acc_stderr\": 0.03256850570293647,\n \"acc_norm\": 0.6481481481481481,\n \"acc_norm_stderr\": 0.03256850570293647\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.8480392156862745,\n \"acc_stderr\": 0.025195658428931792,\n \"acc_norm\": 0.8480392156862745,\n \"acc_norm_stderr\": 0.025195658428931792\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.890295358649789,\n \"acc_stderr\": 0.02034340073486884,\n \"acc_norm\": 0.890295358649789,\n \"acc_norm_stderr\": 0.02034340073486884\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.7802690582959642,\n \"acc_stderr\": 0.027790177064383595,\n \"acc_norm\": 0.7802690582959642,\n \"acc_norm_stderr\": 0.027790177064383595\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.8091603053435115,\n \"acc_stderr\": 0.03446513350752598,\n \"acc_norm\": 0.8091603053435115,\n \"acc_norm_stderr\": 0.03446513350752598\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.8760330578512396,\n \"acc_stderr\": 0.03008309871603521,\n \"acc_norm\": 0.8760330578512396,\n \"acc_norm_stderr\": 0.03008309871603521\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.8333333333333334,\n \"acc_stderr\": 0.03602814176392645,\n \"acc_norm\": 0.8333333333333334,\n \"acc_norm_stderr\": 0.03602814176392645\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.7730061349693251,\n \"acc_stderr\": 0.032910995786157686,\n \"acc_norm\": 0.7730061349693251,\n \"acc_norm_stderr\": 0.032910995786157686\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.5357142857142857,\n \"acc_stderr\": 0.04733667890053756,\n \"acc_norm\": 0.5357142857142857,\n \"acc_norm_stderr\": 0.04733667890053756\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.883495145631068,\n \"acc_stderr\": 0.03176683948640407,\n \"acc_norm\": 0.883495145631068,\n \"acc_norm_stderr\": 0.03176683948640407\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.9188034188034188,\n \"acc_stderr\": 0.017893784904018533,\n \"acc_norm\": 0.9188034188034188,\n \"acc_norm_stderr\": 0.017893784904018533\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.78,\n \"acc_stderr\": 0.04163331998932263,\n \"acc_norm\": 0.78,\n \"acc_norm_stderr\": 0.04163331998932263\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.8748403575989783,\n \"acc_stderr\": 0.011832954239305723,\n \"acc_norm\": 0.8748403575989783,\n \"acc_norm_stderr\": 0.011832954239305723\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.7976878612716763,\n \"acc_stderr\": 0.021628077380196124,\n \"acc_norm\": 0.7976878612716763,\n \"acc_norm_stderr\": 0.021628077380196124\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.4011173184357542,\n \"acc_stderr\": 0.01639222189940708,\n \"acc_norm\": 0.4011173184357542,\n \"acc_norm_stderr\": 0.01639222189940708\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.8235294117647058,\n \"acc_stderr\": 0.021828596053108402,\n \"acc_norm\": 0.8235294117647058,\n \"acc_norm_stderr\": 0.021828596053108402\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.7845659163987139,\n \"acc_stderr\": 0.023350225475471442,\n \"acc_norm\": 0.7845659163987139,\n \"acc_norm_stderr\": 0.023350225475471442\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.8395061728395061,\n \"acc_stderr\": 0.020423955354778027,\n \"acc_norm\": 0.8395061728395061,\n \"acc_norm_stderr\": 0.020423955354778027\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.5177304964539007,\n \"acc_stderr\": 0.02980873964223777,\n \"acc_norm\": 0.5177304964539007,\n \"acc_norm_stderr\": 0.02980873964223777\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.5319426336375489,\n \"acc_stderr\": 0.012744149704869645,\n \"acc_norm\": 0.5319426336375489,\n \"acc_norm_stderr\": 0.012744149704869645\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.8125,\n \"acc_stderr\": 0.023709788253811766,\n \"acc_norm\": 0.8125,\n \"acc_norm_stderr\": 0.023709788253811766\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.7843137254901961,\n \"acc_stderr\": 0.016639319350313264,\n \"acc_norm\": 0.7843137254901961,\n \"acc_norm_stderr\": 0.016639319350313264\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.7,\n \"acc_stderr\": 0.04389311454644287,\n \"acc_norm\": 0.7,\n \"acc_norm_stderr\": 0.04389311454644287\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.7877551020408163,\n \"acc_stderr\": 0.026176967197866767,\n \"acc_norm\": 0.7877551020408163,\n \"acc_norm_stderr\": 0.026176967197866767\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.8905472636815921,\n \"acc_stderr\": 0.022076326101824657,\n \"acc_norm\": 0.8905472636815921,\n \"acc_norm_stderr\": 0.022076326101824657\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.92,\n \"acc_stderr\": 0.0272659924344291,\n \"acc_norm\": 0.92,\n \"acc_norm_stderr\": 0.0272659924344291\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.5120481927710844,\n \"acc_stderr\": 0.03891364495835817,\n \"acc_norm\": 0.5120481927710844,\n \"acc_norm_stderr\": 0.03891364495835817\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8771929824561403,\n \"acc_stderr\": 0.02517298435015575,\n \"acc_norm\": 0.8771929824561403,\n \"acc_norm_stderr\": 0.02517298435015575\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.3182374541003672,\n \"mc1_stderr\": 0.01630598864892061,\n \"mc2\": 0.4680543300316138,\n \"mc2_stderr\": 0.014120170542973978\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.8168902920284136,\n \"acc_stderr\": 0.01086977863316836\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.576194086429113,\n \"acc_stderr\": 0.01361163200881036\n }\n}\n```", "repo_url": "https://huggingface.co/mistralai/Mixtral-8x7B-v0.1", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|arc:challenge|25_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|arc:challenge|25_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|arc:challenge|25_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|gsm8k|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|gsm8k|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|gsm8k|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hellaswag|10_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hellaswag|10_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hellaswag|10_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-11T18-04-02.035270.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-15T14-35-04.630519.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-anatomy|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-astronomy|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-college_biology|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-college_physics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-computer_security|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-econometrics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-global_facts|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-human_aging|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-international_law|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-management|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-marketing|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-nutrition|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-philosophy|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-prehistory|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-professional_law|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-public_relations|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-security_studies|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-sociology|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-virology|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-world_religions|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-anatomy|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-astronomy|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-college_biology|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-college_physics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-computer_security|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-econometrics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-global_facts|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-human_aging|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-international_law|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-management|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-marketing|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-nutrition|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-philosophy|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-prehistory|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-professional_law|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-public_relations|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-security_studies|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-sociology|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-virology|5_2024-01-04T16-34-48.985318.parquet", "**/details_harness|hendrycksTest-world_religions|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-anatomy|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-astronomy|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-college_biology|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-college_physics|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-computer_security|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-econometrics|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-global_facts|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-human_aging|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-international_law|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-management|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-marketing|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-nutrition|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-philosophy|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-prehistory|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-professional_law|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-public_relations|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-security_studies|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-sociology|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-virology|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|hendrycksTest-world_religions|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|truthfulqa:mc|0_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["**/details_harness|winogrande|5_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["**/details_harness|winogrande|5_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["**/details_harness|winogrande|5_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2024-01-04T16-34-48.985318.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_11T18_04_02.035270", "path": ["results_2023-12-11T18-04-02.035270.parquet"]}, {"split": "2023_12_15T14_35_04.630519", "path": ["results_2023-12-15T14-35-04.630519.parquet"]}, {"split": "2024_01_04T16_34_48.985318", "path": ["results_2024-01-04T16-34-48.985318.parquet"]}, {"split": "latest", "path": ["results_2024-01-04T16-34-48.985318.parquet"]}]}]}
2024-01-04T16:38:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of mistralai/Mixtral-8x7B-v0.1 Dataset automatically created during the evaluation run of model mistralai/Mixtral-8x7B-v0.1 on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2024-01-04T16:34:48.985318(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of mistralai/Mixtral-8x7B-v0.1\n\n\n\nDataset automatically created during the evaluation run of model mistralai/Mixtral-8x7B-v0.1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2024-01-04T16:34:48.985318(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of mistralai/Mixtral-8x7B-v0.1\n\n\n\nDataset automatically created during the evaluation run of model mistralai/Mixtral-8x7B-v0.1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2024-01-04T16:34:48.985318(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 189, 68, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of mistralai/Mixtral-8x7B-v0.1\n\n\n\nDataset automatically created during the evaluation run of model mistralai/Mixtral-8x7B-v0.1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2024-01-04T16:34:48.985318(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]" ]
782ceaaeb7f01c47e80cfdcf61a577ce01942fa2
# Dataset Card for "semeval-task-8-a-mono-v2-test-paraphrase-2-mistral-7b" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kpriyanshu256/semeval-task-8-a-mono-v2-test-paraphrase-2-mistral-7b
[ "region:us" ]
2023-12-11T18:38:24+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "model", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "paraphrase", "dtype": "string"}, {"name": "paraphrase2", "dtype": "string"}, {"name": "mistral-7b_estimated_loss", "dtype": "float64"}, {"name": "mistral-7b_mean_lowest25", "dtype": "float64"}, {"name": "mistral-7b_mean_highest25", "dtype": "float64"}, {"name": "mistral-7b_max", "dtype": "float64"}, {"name": "mistral-7b_min", "dtype": "float64"}, {"name": "mistral-7b_range", "dtype": "float64"}, {"name": "mistral-7b_mean", "dtype": "float64"}, {"name": "mistral-7b_std", "dtype": "float64"}, {"name": "mistral-7b_entropy", "dtype": "float64"}, {"name": "mistral-7b_kurtosis", "dtype": "float64"}, {"name": "mistral-7b_skewness", "dtype": "float64"}, {"name": "mistral-7b_perplexity", "dtype": "float64"}], "splits": [{"name": "test", "num_bytes": 23867836, "num_examples": 5000}], "download_size": 13908370, "dataset_size": 23867836}}
2023-12-11T18:38:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "semeval-task-8-a-mono-v2-test-paraphrase-2-mistral-7b" More Information needed
[ "# Dataset Card for \"semeval-task-8-a-mono-v2-test-paraphrase-2-mistral-7b\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"semeval-task-8-a-mono-v2-test-paraphrase-2-mistral-7b\"\n\nMore Information needed" ]
[ 6, 35 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"semeval-task-8-a-mono-v2-test-paraphrase-2-mistral-7b\"\n\nMore Information needed" ]
56763b13c50307c9ef2f286264178a53525b72b3
Dataset from: https://huggingface.co/datasets/glue Every split besides the ax split is in this dataset. Lines above 512 characters from the BERT-cased (bert-base-cased) tokenizer are removed
gmongaras/BERT_Base_Cased_512_GLUE
[ "region:us" ]
2023-12-11T18:54:49+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "sentence", "dtype": "string"}, {"name": "label", "dtype": "float64"}, {"name": "dataset_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 163269248, "num_examples": 949728}, {"name": "validation", "num_bytes": 12111201, "num_examples": 69711}, {"name": "test", "num_bytes": 64264632, "num_examples": 425205}], "download_size": 135600002, "dataset_size": 239645081}}
2023-12-11T19:27:11+00:00
[]
[]
TAGS #region-us
Dataset from: URL Every split besides the ax split is in this dataset. Lines above 512 characters from the BERT-cased (bert-base-cased) tokenizer are removed
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
40c8a062005684f623bd70f6be511e6ba11da952
### Dataset Summary This dataset consists of 3570 tweets, which were manually labeled as cyberbullying or not cyberbullying. A distinguishing feature of this dataset is that for a given word, there is an annotated tweet labeled as cyberbullying that contains that word, and another tweet labeled as not cyberbullying with the same word. This is made possible because the context in which the same word is used can vary, leading to tweets being classified differently. For instance, tweets in the not cyberbullying category predominantly contain obscene words that, in their particular context, do not correspond with cyberbullying. An example is “Marica, se me olvidó ver el partido”. Additionally, the not cyberbullying category, to a lesser extent, includes tweets sourced from trends in the Colombian region. Twitter trends reflect the most popular topics and conversations in a given area at a specific time, essentially capturing what people are discussing and sharing online in that geographical locale. Trend-based tweets were utilized for those instances where it was not feasible to obtain not cyberbullying tweets containing a specific offensive word or phrase, such as “ojala te violen”. Conversely, tweets labeled as cyberbullying might not always contain words or phrases that are deemed strong or obscene, like in the example “te voy a buscar”. The distribution of cyberbullying tweets and non-cyberbullying tweets was the same. The keywords and phrases used in the creation of the dataset were selected based on the categories provided in the article Guidelines for the Fine-Grained Analysis of Cyberbullying authored by Cynthia Van Hee, Ben Verhoeven, Els Lefever, Guy De Pauw, Walter Daelemans, and Véronique Hoste. Four categories were included: insult, threat, curse, and defamation. The insult category involves the use of offensive words intended to verbally hurt another person, while threat aims to harm the victim's integrity. Curse includes words that wish harm or misfortune upon a person, and defamation seeks to damage the victim’s reputation. These categories were chosen to capture a broad representation of the forms in which cyberbullying can manifest. The tweets were labeled by an occupational therapist associated with the project.
FelipeGuerra/Colombian_Spanish_Cyberbullying_Dataset_1
[ "license:mit", "region:us" ]
2023-12-11T18:56:21+00:00
{"license": "mit"}
2023-12-11T19:30:25+00:00
[]
[]
TAGS #license-mit #region-us
### Dataset Summary This dataset consists of 3570 tweets, which were manually labeled as cyberbullying or not cyberbullying. A distinguishing feature of this dataset is that for a given word, there is an annotated tweet labeled as cyberbullying that contains that word, and another tweet labeled as not cyberbullying with the same word. This is made possible because the context in which the same word is used can vary, leading to tweets being classified differently. For instance, tweets in the not cyberbullying category predominantly contain obscene words that, in their particular context, do not correspond with cyberbullying. An example is “Marica, se me olvidó ver el partido”. Additionally, the not cyberbullying category, to a lesser extent, includes tweets sourced from trends in the Colombian region. Twitter trends reflect the most popular topics and conversations in a given area at a specific time, essentially capturing what people are discussing and sharing online in that geographical locale. Trend-based tweets were utilized for those instances where it was not feasible to obtain not cyberbullying tweets containing a specific offensive word or phrase, such as “ojala te violen”. Conversely, tweets labeled as cyberbullying might not always contain words or phrases that are deemed strong or obscene, like in the example “te voy a buscar”. The distribution of cyberbullying tweets and non-cyberbullying tweets was the same. The keywords and phrases used in the creation of the dataset were selected based on the categories provided in the article Guidelines for the Fine-Grained Analysis of Cyberbullying authored by Cynthia Van Hee, Ben Verhoeven, Els Lefever, Guy De Pauw, Walter Daelemans, and Véronique Hoste. Four categories were included: insult, threat, curse, and defamation. The insult category involves the use of offensive words intended to verbally hurt another person, while threat aims to harm the victim's integrity. Curse includes words that wish harm or misfortune upon a person, and defamation seeks to damage the victim’s reputation. These categories were chosen to capture a broad representation of the forms in which cyberbullying can manifest. The tweets were labeled by an occupational therapist associated with the project.
[ "### Dataset Summary\n\nThis dataset consists of 3570 tweets, which were manually labeled as cyberbullying or not cyberbullying. A distinguishing feature of this dataset is that for a given word, there is an annotated tweet labeled as cyberbullying that contains that word, and another tweet labeled as not cyberbullying with the same word. This is made possible because the context in which the same word is used can vary, leading to tweets being classified differently.\n\nFor instance, tweets in the not cyberbullying category predominantly contain obscene words that, in their particular context, do not correspond with cyberbullying. An example is “Marica, se me olvidó ver el partido”. Additionally, the not cyberbullying category, to a lesser extent, includes tweets sourced from trends in the Colombian region. Twitter trends reflect the most popular topics and conversations in a given area at a specific time, essentially capturing what people are discussing and sharing online in that geographical locale.\n\nTrend-based tweets were utilized for those instances where it was not feasible to obtain not cyberbullying tweets containing a specific offensive word or phrase, such as “ojala te violen”. Conversely, tweets labeled as cyberbullying might not always contain words or phrases that are deemed strong or obscene, like in the example “te voy a buscar”.\n\nThe distribution of cyberbullying tweets and non-cyberbullying tweets was the same. The keywords and phrases used in the creation of the dataset were selected based on the categories provided in the article Guidelines for the Fine-Grained Analysis of Cyberbullying authored by Cynthia Van Hee, Ben Verhoeven, Els Lefever, Guy De Pauw, Walter Daelemans, and Véronique Hoste. Four categories were included: insult, threat, curse, and defamation. The insult category involves the use of offensive words intended to verbally hurt another person, while threat aims to harm the victim's integrity. Curse includes words that wish harm or misfortune upon a person, and defamation seeks to damage the victim’s reputation. These categories were chosen to capture a broad representation of the forms in which cyberbullying can manifest. The tweets were labeled by an occupational therapist associated with the project." ]
[ "TAGS\n#license-mit #region-us \n", "### Dataset Summary\n\nThis dataset consists of 3570 tweets, which were manually labeled as cyberbullying or not cyberbullying. A distinguishing feature of this dataset is that for a given word, there is an annotated tweet labeled as cyberbullying that contains that word, and another tweet labeled as not cyberbullying with the same word. This is made possible because the context in which the same word is used can vary, leading to tweets being classified differently.\n\nFor instance, tweets in the not cyberbullying category predominantly contain obscene words that, in their particular context, do not correspond with cyberbullying. An example is “Marica, se me olvidó ver el partido”. Additionally, the not cyberbullying category, to a lesser extent, includes tweets sourced from trends in the Colombian region. Twitter trends reflect the most popular topics and conversations in a given area at a specific time, essentially capturing what people are discussing and sharing online in that geographical locale.\n\nTrend-based tweets were utilized for those instances where it was not feasible to obtain not cyberbullying tweets containing a specific offensive word or phrase, such as “ojala te violen”. Conversely, tweets labeled as cyberbullying might not always contain words or phrases that are deemed strong or obscene, like in the example “te voy a buscar”.\n\nThe distribution of cyberbullying tweets and non-cyberbullying tweets was the same. The keywords and phrases used in the creation of the dataset were selected based on the categories provided in the article Guidelines for the Fine-Grained Analysis of Cyberbullying authored by Cynthia Van Hee, Ben Verhoeven, Els Lefever, Guy De Pauw, Walter Daelemans, and Véronique Hoste. Four categories were included: insult, threat, curse, and defamation. The insult category involves the use of offensive words intended to verbally hurt another person, while threat aims to harm the victim's integrity. Curse includes words that wish harm or misfortune upon a person, and defamation seeks to damage the victim’s reputation. These categories were chosen to capture a broad representation of the forms in which cyberbullying can manifest. The tweets were labeled by an occupational therapist associated with the project." ]
[ 11, 540 ]
[ "passage: TAGS\n#license-mit #region-us \n" ]
8bae46bba9bd71b7bed965988814e7334a07fec2
# Code des douanes, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - `instruction`: `string`, presenting the instruction linked to the element. - `input`: `string`, signifying the input details for the element. - `output`: `string`, indicating the output information for the element. We used the following list of instructions for generating the dataset: ```python instructions = [ "Compose l'intégralité de l'article sous forme écrite.", "Écris la totalité du contenu de l'article.", "Formule la totalité du texte présent dans l'article.", "Produis l'intégralité de l'article en écriture.", "Développe l'article dans son ensemble par écrit.", "Génère l'ensemble du texte contenu dans l'article.", "Formule le contenu intégral de l'article en entier.", "Rédige la totalité du texte de l'article en entier.", "Compose l'intégralité du contenu textuel de l'article.", "Rédige l'ensemble du texte qui constitue l'article.", "Formule l'article entier dans son contenu écrit.", "Composez l'intégralité de l'article sous forme écrite.", "Écrivez la totalité du contenu de l'article.", "Formulez la totalité du texte présent dans l'article.", "Développez l'article dans son ensemble par écrit.", "Générez l'ensemble du texte contenu dans l'article.", "Formulez le contenu intégral de l'article en entier.", "Rédigez la totalité du texte de l'article en entier.", "Composez l'intégralité du contenu textuel de l'article.", "Écrivez l'article dans son intégralité en termes de texte.", "Rédigez l'ensemble du texte qui constitue l'article.", "Formulez l'article entier dans son contenu écrit.", "Composer l'intégralité de l'article sous forme écrite.", "Écrire la totalité du contenu de l'article.", "Formuler la totalité du texte présent dans l'article.", "Produire l'intégralité de l'article en écriture.", "Développer l'article dans son ensemble par écrit.", "Générer l'ensemble du texte contenu dans l'article.", "Formuler le contenu intégral de l'article en entier.", "Rédiger la totalité du texte de l'article en entier.", "Composer l'intégralité du contenu textuel de l'article.", "Rédiger l'ensemble du texte qui constitue l'article.", "Formuler l'article entier dans son contenu écrit.", "Quelles sont les dispositions de l'article ?", "Quelles dispositions sont incluses dans l'article ?", "Quelles sont les dispositions énoncées dans l'article ?", "Quel est le texte intégral de l'article ?", "Quelle est la lettre de l'article ?" ] ``` ## Citing this project If you use this code in your research, please use the following BibTeX entry. ```BibTeX @misc{louisbrulenaudet2023, author = {Louis Brulé Naudet}, title = {Code des douanes, non-instruct (11-12-2023)}, howpublished = {\url{https://huggingface.co/datasets/louisbrulenaudet/code-douanes}}, year = {2023} } ``` ## Feedback If you have any feedback, please reach out at [[email protected]](mailto:[email protected]).
louisbrulenaudet/code-douanes
[ "task_categories:text-generation", "task_categories:table-question-answering", "task_categories:summarization", "task_categories:conversational", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:fr", "license:apache-2.0", "finetuning", "legal", "french law", "droit français", "Code des douanes", "doi:10.57967/hf/1447", "region:us" ]
2023-12-11T19:08:27+00:00
{"language": ["fr"], "license": "apache-2.0", "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["text-generation", "table-question-answering", "summarization", "conversational"], "pretty_name": "Code des douanes", "tags": ["finetuning", "legal", "french law", "droit fran\u00e7ais", "Code des douanes"]}
2023-12-12T10:43:48+00:00
[]
[ "fr" ]
TAGS #task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code des douanes #doi-10.57967/hf/1447 #region-us
# Code des douanes, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - 'instruction': 'string', presenting the instruction linked to the element. - 'input': 'string', signifying the input details for the element. - 'output': 'string', indicating the output information for the element. We used the following list of instructions for generating the dataset: ## Citing this project If you use this code in your research, please use the following BibTeX entry. ## Feedback If you have any feedback, please reach out at louisbrulenaudet@URL.
[ "# Code des douanes, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ "TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code des douanes #doi-10.57967/hf/1447 #region-us \n", "# Code des douanes, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ 125, 502, 100, 24, 21 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code des douanes #doi-10.57967/hf/1447 #region-us \n" ]
bd37158db078224c2cf47488d81624ccabf3dc02
# Code de la consommation, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - `instruction`: `string`, presenting the instruction linked to the element. - `input`: `string`, signifying the input details for the element. - `output`: `string`, indicating the output information for the element. We used the following list of instructions for generating the dataset: ```python instructions = [ "Compose l'intégralité de l'article sous forme écrite.", "Écris la totalité du contenu de l'article.", "Formule la totalité du texte présent dans l'article.", "Produis l'intégralité de l'article en écriture.", "Développe l'article dans son ensemble par écrit.", "Génère l'ensemble du texte contenu dans l'article.", "Formule le contenu intégral de l'article en entier.", "Rédige la totalité du texte de l'article en entier.", "Compose l'intégralité du contenu textuel de l'article.", "Rédige l'ensemble du texte qui constitue l'article.", "Formule l'article entier dans son contenu écrit.", "Composez l'intégralité de l'article sous forme écrite.", "Écrivez la totalité du contenu de l'article.", "Formulez la totalité du texte présent dans l'article.", "Développez l'article dans son ensemble par écrit.", "Générez l'ensemble du texte contenu dans l'article.", "Formulez le contenu intégral de l'article en entier.", "Rédigez la totalité du texte de l'article en entier.", "Composez l'intégralité du contenu textuel de l'article.", "Écrivez l'article dans son intégralité en termes de texte.", "Rédigez l'ensemble du texte qui constitue l'article.", "Formulez l'article entier dans son contenu écrit.", "Composer l'intégralité de l'article sous forme écrite.", "Écrire la totalité du contenu de l'article.", "Formuler la totalité du texte présent dans l'article.", "Produire l'intégralité de l'article en écriture.", "Développer l'article dans son ensemble par écrit.", "Générer l'ensemble du texte contenu dans l'article.", "Formuler le contenu intégral de l'article en entier.", "Rédiger la totalité du texte de l'article en entier.", "Composer l'intégralité du contenu textuel de l'article.", "Rédiger l'ensemble du texte qui constitue l'article.", "Formuler l'article entier dans son contenu écrit.", "Quelles sont les dispositions de l'article ?", "Quelles dispositions sont incluses dans l'article ?", "Quelles sont les dispositions énoncées dans l'article ?", "Quel est le texte intégral de l'article ?", "Quelle est la lettre de l'article ?" ] ``` ## Citing this project If you use this code in your research, please use the following BibTeX entry. ```BibTeX @misc{louisbrulenaudet2023, author = {Louis Brulé Naudet}, title = {Code de la consommation, non-instruct (11-12-2023)}, howpublished = {\url{https://huggingface.co/datasets/louisbrulenaudet/code-consommation}}, year = {2023} } ``` ## Feedback If you have any feedback, please reach out at [[email protected]](mailto:[email protected]).
louisbrulenaudet/code-consommation
[ "task_categories:text-generation", "task_categories:table-question-answering", "task_categories:summarization", "task_categories:conversational", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:fr", "license:apache-2.0", "finetuning", "legal", "french law", "droit français", "Code de la consommation", "doi:10.57967/hf/1446", "region:us" ]
2023-12-11T19:38:36+00:00
{"language": ["fr"], "license": "apache-2.0", "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["text-generation", "table-question-answering", "summarization", "conversational"], "pretty_name": "Code de la consommation", "tags": ["finetuning", "legal", "french law", "droit fran\u00e7ais", "Code de la consommation"]}
2023-12-12T10:43:21+00:00
[]
[ "fr" ]
TAGS #task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code de la consommation #doi-10.57967/hf/1446 #region-us
# Code de la consommation, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - 'instruction': 'string', presenting the instruction linked to the element. - 'input': 'string', signifying the input details for the element. - 'output': 'string', indicating the output information for the element. We used the following list of instructions for generating the dataset: ## Citing this project If you use this code in your research, please use the following BibTeX entry. ## Feedback If you have any feedback, please reach out at louisbrulenaudet@URL.
[ "# Code de la consommation, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ "TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code de la consommation #doi-10.57967/hf/1446 #region-us \n", "# Code de la consommation, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ 125, 502, 100, 24, 21 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code de la consommation #doi-10.57967/hf/1446 #region-us \n" ]
eb56aab0d9eecc8d9087901ab0f55fbe07ca91fe
### Dataset Summary This dataset consists of 2566 tweets and maintains a balanced distribution between cyberbullying and not cyberbullying. For every keyword or phrase, there is an annotated tweet labeled as cyberbullying that contains that word or phrase. The not cyberbullying category predominantly includes tweets that do not contain obscene words and are sourced from popular and varied discussions involving colombian users, reflecting a wide range of topics and conversations. The distribution of cyberbullying tweets and non-cyberbullying tweets was the same. The keywords and phrases used in the creation of the dataset were selected based on the categories provided in the article Guidelines for the Fine-Grained Analysis of Cyberbullying authored by Cynthia Van Hee, Ben Verhoeven, Els Lefever, Guy De Pauw, Walter Daelemans, and Véronique Hoste. Four categories were included: insult, threat, curse, and defamation. The insult category involves the use of offensive words intended to verbally hurt another person, while threat aims to harm the victim's integrity. Curse includes words that wish harm or misfortune upon a person, and defamation seeks to damage the victim’s reputation. These categories were chosen to capture a broad representation of the forms in which cyberbullying can manifest. The tweets were labeled by an occupational therapist associated with the project.
FelipeGuerra/Colombian_Spanish_Cyberbullying_Dataset_2
[ "license:mit", "region:us" ]
2023-12-11T19:39:35+00:00
{"license": "mit"}
2023-12-12T18:50:37+00:00
[]
[]
TAGS #license-mit #region-us
### Dataset Summary This dataset consists of 2566 tweets and maintains a balanced distribution between cyberbullying and not cyberbullying. For every keyword or phrase, there is an annotated tweet labeled as cyberbullying that contains that word or phrase. The not cyberbullying category predominantly includes tweets that do not contain obscene words and are sourced from popular and varied discussions involving colombian users, reflecting a wide range of topics and conversations. The distribution of cyberbullying tweets and non-cyberbullying tweets was the same. The keywords and phrases used in the creation of the dataset were selected based on the categories provided in the article Guidelines for the Fine-Grained Analysis of Cyberbullying authored by Cynthia Van Hee, Ben Verhoeven, Els Lefever, Guy De Pauw, Walter Daelemans, and Véronique Hoste. Four categories were included: insult, threat, curse, and defamation. The insult category involves the use of offensive words intended to verbally hurt another person, while threat aims to harm the victim's integrity. Curse includes words that wish harm or misfortune upon a person, and defamation seeks to damage the victim’s reputation. These categories were chosen to capture a broad representation of the forms in which cyberbullying can manifest. The tweets were labeled by an occupational therapist associated with the project.
[ "### Dataset Summary\n\nThis dataset consists of 2566 tweets and maintains a balanced distribution between cyberbullying and not cyberbullying. For every keyword or phrase, there is an annotated tweet labeled as cyberbullying that contains that word or phrase.\n\nThe not cyberbullying category predominantly includes tweets that do not contain obscene words and are sourced from popular and varied discussions involving colombian users, reflecting a wide range of topics and conversations.\n\nThe distribution of cyberbullying tweets and non-cyberbullying tweets was the same. The keywords and phrases used in the creation of the dataset were selected based on the categories provided in the article Guidelines for the Fine-Grained Analysis of Cyberbullying authored by Cynthia Van Hee, Ben Verhoeven, Els Lefever, Guy De Pauw, Walter Daelemans, and Véronique Hoste. Four categories were included: insult, threat, curse, and defamation. The insult category involves the use of offensive words intended to verbally hurt another person, while threat aims to harm the victim's integrity. Curse includes words that wish harm or misfortune upon a person, and defamation seeks to damage the victim’s reputation. These categories were chosen to capture a broad representation of the forms in which cyberbullying can manifest. The tweets were labeled by an occupational therapist associated with the project." ]
[ "TAGS\n#license-mit #region-us \n", "### Dataset Summary\n\nThis dataset consists of 2566 tweets and maintains a balanced distribution between cyberbullying and not cyberbullying. For every keyword or phrase, there is an annotated tweet labeled as cyberbullying that contains that word or phrase.\n\nThe not cyberbullying category predominantly includes tweets that do not contain obscene words and are sourced from popular and varied discussions involving colombian users, reflecting a wide range of topics and conversations.\n\nThe distribution of cyberbullying tweets and non-cyberbullying tweets was the same. The keywords and phrases used in the creation of the dataset were selected based on the categories provided in the article Guidelines for the Fine-Grained Analysis of Cyberbullying authored by Cynthia Van Hee, Ben Verhoeven, Els Lefever, Guy De Pauw, Walter Daelemans, and Véronique Hoste. Four categories were included: insult, threat, curse, and defamation. The insult category involves the use of offensive words intended to verbally hurt another person, while threat aims to harm the victim's integrity. Curse includes words that wish harm or misfortune upon a person, and defamation seeks to damage the victim’s reputation. These categories were chosen to capture a broad representation of the forms in which cyberbullying can manifest. The tweets were labeled by an occupational therapist associated with the project." ]
[ 11, 331 ]
[ "passage: TAGS\n#license-mit #region-us \n### Dataset Summary\n\nThis dataset consists of 2566 tweets and maintains a balanced distribution between cyberbullying and not cyberbullying. For every keyword or phrase, there is an annotated tweet labeled as cyberbullying that contains that word or phrase.\n\nThe not cyberbullying category predominantly includes tweets that do not contain obscene words and are sourced from popular and varied discussions involving colombian users, reflecting a wide range of topics and conversations.\n\nThe distribution of cyberbullying tweets and non-cyberbullying tweets was the same. The keywords and phrases used in the creation of the dataset were selected based on the categories provided in the article Guidelines for the Fine-Grained Analysis of Cyberbullying authored by Cynthia Van Hee, Ben Verhoeven, Els Lefever, Guy De Pauw, Walter Daelemans, and Véronique Hoste. Four categories were included: insult, threat, curse, and defamation. The insult category involves the use of offensive words intended to verbally hurt another person, while threat aims to harm the victim's integrity. Curse includes words that wish harm or misfortune upon a person, and defamation seeks to damage the victim’s reputation. These categories were chosen to capture a broad representation of the forms in which cyberbullying can manifest. The tweets were labeled by an occupational therapist associated with the project." ]
c6074d453ebb525effc750edca60cbfebb77cbd9
Original Dataset from: https://huggingface.co/datasets/glue This dataset is adapted from https://huggingface.co/datasets/gmongaras/BERT_Base_Cased_512_GLUE Every split besides the ax split is in this dataset. Lines above 512 tokens from the BERT-cased (bert-base-cased) tokenizer are removed in the original dataset If in any case the sentences are longer than 512 tokens, they are subsetted. Original labels and dataset categories are retained.
gmongaras/BERT_Base_Cased_512_GLUE_Mapped
[ "region:us" ]
2023-12-11T20:34:01+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "label", "dtype": "float64"}, {"name": "dataset_name", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "token_type_ids", "sequence": "int8"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 232895922, "num_examples": 949728}, {"name": "validation", "num_bytes": 17255970, "num_examples": 69711}, {"name": "test", "num_bytes": 96102951, "num_examples": 425205}], "download_size": 123150665, "dataset_size": 346254843}}
2023-12-11T20:34:53+00:00
[]
[]
TAGS #region-us
Original Dataset from: URL This dataset is adapted from URL Every split besides the ax split is in this dataset. Lines above 512 tokens from the BERT-cased (bert-base-cased) tokenizer are removed in the original dataset If in any case the sentences are longer than 512 tokens, they are subsetted. Original labels and dataset categories are retained.
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
22d33393fefe6a2cb9f4cde5beebfba995a5576d
## Dataset Summary This is the training data of the model `Propositionizer-wiki`. We prompt GPT-4 to decompose a Wikipedia paragraph into a list of propositions. We propose this training data to explore the concept of propositions as retrieval units. The propositions are defined as follows: 1. Each proposition should correspond to a distinct piece of meaning in the text, where the composition of all propositions would represent the semantics of the entire text. 2. A proposition should be *minimal*, i.e. it cannot be further split into separate propositions. 3. A proposition should be *contextualized and self-contained* ([Choi et al. 2021](https://aclanthology.org/2021.tacl-1.27/)). A proposition should include all the necessary context from the text (e.g. coreference) to interpret its meaning. Check out more details in the paper. ## Dataset Structure Here we provide details about the structure of the dataset. * `sources` represents a Wikipedia paragraph. It is always in the format of "Title: {title}. Section: {section}. {content}". The title will not be empty, but the section can be empty. * `targets` are a list of propositions in a JSON-formatted string. Example: ``` { "sources": "Title: Leaning Tower of Pisa. Section: . Prior to restoration work performed between 1990 and 2001, the tower leaned at an angle of 5.5 degrees, but the tower now leans at about 3.99 degrees. This means the top of the Leaning Tower of Pisa is displaced horizontally 3.9 meters (12 ft 10 in) from the center." "targets": "[\"Prior to restoration work performed between 1990 and 2001, the Leaning Tower of Pisa leaned at an angle of 5.5 degrees.\", \"The Leaning Tower of Pisa now leans at about 3.99 degrees.\", \"The top of the Leaning Tower of Pisa is displaced horizontally 3.9 meters (12 ft 10 in) from the center.\"]" } ``` ## Citation ``` ```
chentong00/propositionizer-wiki-data
[ "task_categories:text2text-generation", "size_categories:10K<n<100K", "license:apache-2.0", "region:us" ]
2023-12-11T20:37:47+00:00
{"license": "apache-2.0", "size_categories": ["10K<n<100K"], "task_categories": ["text2text-generation"]}
2023-12-11T21:51:06+00:00
[]
[]
TAGS #task_categories-text2text-generation #size_categories-10K<n<100K #license-apache-2.0 #region-us
## Dataset Summary This is the training data of the model 'Propositionizer-wiki'. We prompt GPT-4 to decompose a Wikipedia paragraph into a list of propositions. We propose this training data to explore the concept of propositions as retrieval units. The propositions are defined as follows: 1. Each proposition should correspond to a distinct piece of meaning in the text, where the composition of all propositions would represent the semantics of the entire text. 2. A proposition should be *minimal*, i.e. it cannot be further split into separate propositions. 3. A proposition should be *contextualized and self-contained* (Choi et al. 2021). A proposition should include all the necessary context from the text (e.g. coreference) to interpret its meaning. Check out more details in the paper. ## Dataset Structure Here we provide details about the structure of the dataset. * 'sources' represents a Wikipedia paragraph. It is always in the format of "Title: {title}. Section: {section}. {content}". The title will not be empty, but the section can be empty. * 'targets' are a list of propositions in a JSON-formatted string. Example:
[ "## Dataset Summary\n\nThis is the training data of the model 'Propositionizer-wiki'. We prompt GPT-4 to decompose a Wikipedia paragraph into a list of propositions. \n\nWe propose this training data to explore the concept of propositions as retrieval units. The propositions are defined as follows:\n1. Each proposition should correspond to a distinct piece of meaning in the text, where the composition of all propositions would represent the semantics of the entire text.\n2. A proposition should be *minimal*, i.e. it cannot be further split into separate propositions.\n3. A proposition should be *contextualized and self-contained* (Choi et al. 2021). A proposition should include all the necessary context from the text (e.g. coreference) to interpret its meaning.\n\nCheck out more details in the paper.", "## Dataset Structure\n\nHere we provide details about the structure of the dataset. \n\n* 'sources' represents a Wikipedia paragraph. It is always in the format of \"Title: {title}. Section: {section}. {content}\". The title will not be empty, but the section can be empty.\n* 'targets' are a list of propositions in a JSON-formatted string.\n\nExample:" ]
[ "TAGS\n#task_categories-text2text-generation #size_categories-10K<n<100K #license-apache-2.0 #region-us \n", "## Dataset Summary\n\nThis is the training data of the model 'Propositionizer-wiki'. We prompt GPT-4 to decompose a Wikipedia paragraph into a list of propositions. \n\nWe propose this training data to explore the concept of propositions as retrieval units. The propositions are defined as follows:\n1. Each proposition should correspond to a distinct piece of meaning in the text, where the composition of all propositions would represent the semantics of the entire text.\n2. A proposition should be *minimal*, i.e. it cannot be further split into separate propositions.\n3. A proposition should be *contextualized and self-contained* (Choi et al. 2021). A proposition should include all the necessary context from the text (e.g. coreference) to interpret its meaning.\n\nCheck out more details in the paper.", "## Dataset Structure\n\nHere we provide details about the structure of the dataset. \n\n* 'sources' represents a Wikipedia paragraph. It is always in the format of \"Title: {title}. Section: {section}. {content}\". The title will not be empty, but the section can be empty.\n* 'targets' are a list of propositions in a JSON-formatted string.\n\nExample:" ]
[ 39, 182, 93 ]
[ "passage: TAGS\n#task_categories-text2text-generation #size_categories-10K<n<100K #license-apache-2.0 #region-us \n## Dataset Summary\n\nThis is the training data of the model 'Propositionizer-wiki'. We prompt GPT-4 to decompose a Wikipedia paragraph into a list of propositions. \n\nWe propose this training data to explore the concept of propositions as retrieval units. The propositions are defined as follows:\n1. Each proposition should correspond to a distinct piece of meaning in the text, where the composition of all propositions would represent the semantics of the entire text.\n2. A proposition should be *minimal*, i.e. it cannot be further split into separate propositions.\n3. A proposition should be *contextualized and self-contained* (Choi et al. 2021). A proposition should include all the necessary context from the text (e.g. coreference) to interpret its meaning.\n\nCheck out more details in the paper.## Dataset Structure\n\nHere we provide details about the structure of the dataset. \n\n* 'sources' represents a Wikipedia paragraph. It is always in the format of \"Title: {title}. Section: {section}. {content}\". The title will not be empty, but the section can be empty.\n* 'targets' are a list of propositions in a JSON-formatted string.\n\nExample:" ]
5a375b31b60b7d6604e8b400ee2070e346412d33
--- task_categories: - question-answering - summarization + - zero-shot-classification language: - en - el size_categories: - 10K<n<100K + ---
dimitristzel/Diploma
[ "region:us" ]
2023-12-11T22:00:53+00:00
{}
2023-12-21T19:26:16+00:00
[]
[]
TAGS #region-us
--- task_categories: - question-answering - summarization + - zero-shot-classification language: - en - el size_categories: - 10K<n<100K + ---
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
c364b3ed81dfc0f1fbb9e231f4ef1f6c7a3b42e3
# Dataset Card for Evaluation run of v1olet/v1olet_marcoroni-go-bruins-merge-7B <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [v1olet/v1olet_marcoroni-go-bruins-merge-7B](https://huggingface.co/v1olet/v1olet_marcoroni-go-bruins-merge-7B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_v1olet__v1olet_marcoroni-go-bruins-merge-7B", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-11T22:20:05.133817](https://huggingface.co/datasets/open-llm-leaderboard/details_v1olet__v1olet_marcoroni-go-bruins-merge-7B/blob/main/results_2023-12-11T22-20-05.133817.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.6562832889581685, "acc_stderr": 0.031992742713714516, "acc_norm": 0.6562130552581822, "acc_norm_stderr": 0.032653591602583326, "mc1": 0.44430844553243576, "mc1_stderr": 0.017394586250743173, "mc2": 0.6142182914346945, "mc2_stderr": 0.015342703381935975 }, "harness|arc:challenge|25": { "acc": 0.6689419795221843, "acc_stderr": 0.013752062419817837, "acc_norm": 0.7005119453924915, "acc_norm_stderr": 0.013385021637313577 }, "harness|hellaswag|10": { "acc": 0.6898028281218881, "acc_stderr": 0.004616288245259755, "acc_norm": 0.8717386974706234, "acc_norm_stderr": 0.0033369715351311744 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.31, "acc_stderr": 0.04648231987117316, "acc_norm": 0.31, "acc_norm_stderr": 0.04648231987117316 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.6592592592592592, "acc_stderr": 0.04094376269996792, "acc_norm": 0.6592592592592592, "acc_norm_stderr": 0.04094376269996792 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.6907894736842105, "acc_stderr": 0.037610708698674805, "acc_norm": 0.6907894736842105, "acc_norm_stderr": 0.037610708698674805 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.67, "acc_stderr": 0.04725815626252609, "acc_norm": 0.67, "acc_norm_stderr": 0.04725815626252609 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.7245283018867924, "acc_stderr": 0.027495663683724057, "acc_norm": 0.7245283018867924, "acc_norm_stderr": 0.027495663683724057 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.7638888888888888, "acc_stderr": 0.03551446610810826, "acc_norm": 0.7638888888888888, "acc_norm_stderr": 0.03551446610810826 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.47, "acc_stderr": 0.050161355804659205, "acc_norm": 0.47, "acc_norm_stderr": 0.050161355804659205 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.56, "acc_stderr": 0.04988876515698589, "acc_norm": 0.56, "acc_norm_stderr": 0.04988876515698589 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.35, "acc_stderr": 0.047937248544110196, "acc_norm": 0.35, "acc_norm_stderr": 0.047937248544110196 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.6589595375722543, "acc_stderr": 0.036146654241808254, "acc_norm": 0.6589595375722543, "acc_norm_stderr": 0.036146654241808254 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.43137254901960786, "acc_stderr": 0.04928099597287534, "acc_norm": 0.43137254901960786, "acc_norm_stderr": 0.04928099597287534 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.76, "acc_stderr": 0.04292346959909282, "acc_norm": 0.76, "acc_norm_stderr": 0.04292346959909282 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.6, "acc_stderr": 0.03202563076101735, "acc_norm": 0.6, "acc_norm_stderr": 0.03202563076101735 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.49122807017543857, "acc_stderr": 0.04702880432049615, "acc_norm": 0.49122807017543857, "acc_norm_stderr": 0.04702880432049615 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.5310344827586206, "acc_stderr": 0.04158632762097828, "acc_norm": 0.5310344827586206, "acc_norm_stderr": 0.04158632762097828 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.4365079365079365, "acc_stderr": 0.0255428468174005, "acc_norm": 0.4365079365079365, "acc_norm_stderr": 0.0255428468174005 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.5, "acc_stderr": 0.04472135954999579, "acc_norm": 0.5, "acc_norm_stderr": 0.04472135954999579 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.35, "acc_stderr": 0.047937248544110196, "acc_norm": 0.35, "acc_norm_stderr": 0.047937248544110196 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.7677419354838709, "acc_stderr": 0.024022256130308235, "acc_norm": 0.7677419354838709, "acc_norm_stderr": 0.024022256130308235 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.49261083743842365, "acc_stderr": 0.035176035403610084, "acc_norm": 0.49261083743842365, "acc_norm_stderr": 0.035176035403610084 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.71, "acc_stderr": 0.045604802157206845, "acc_norm": 0.71, "acc_norm_stderr": 0.045604802157206845 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.7878787878787878, "acc_stderr": 0.031922715695483, "acc_norm": 0.7878787878787878, "acc_norm_stderr": 0.031922715695483 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.7878787878787878, "acc_stderr": 0.029126522834586815, "acc_norm": 0.7878787878787878, "acc_norm_stderr": 0.029126522834586815 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.9015544041450777, "acc_stderr": 0.02150024957603348, "acc_norm": 0.9015544041450777, "acc_norm_stderr": 0.02150024957603348 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.6794871794871795, "acc_stderr": 0.02366129639396428, "acc_norm": 0.6794871794871795, "acc_norm_stderr": 0.02366129639396428 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.34444444444444444, "acc_stderr": 0.02897264888484427, "acc_norm": 0.34444444444444444, "acc_norm_stderr": 0.02897264888484427 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.680672268907563, "acc_stderr": 0.030283995525884396, "acc_norm": 0.680672268907563, "acc_norm_stderr": 0.030283995525884396 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.3443708609271523, "acc_stderr": 0.038796870240733264, "acc_norm": 0.3443708609271523, "acc_norm_stderr": 0.038796870240733264 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.8495412844036697, "acc_stderr": 0.015328563932669235, "acc_norm": 0.8495412844036697, "acc_norm_stderr": 0.015328563932669235 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.5138888888888888, "acc_stderr": 0.03408655867977749, "acc_norm": 0.5138888888888888, "acc_norm_stderr": 0.03408655867977749 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.8333333333333334, "acc_stderr": 0.026156867523931045, "acc_norm": 0.8333333333333334, "acc_norm_stderr": 0.026156867523931045 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.8143459915611815, "acc_stderr": 0.025310495376944863, "acc_norm": 0.8143459915611815, "acc_norm_stderr": 0.025310495376944863 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.695067264573991, "acc_stderr": 0.030898610882477515, "acc_norm": 0.695067264573991, "acc_norm_stderr": 0.030898610882477515 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.8015267175572519, "acc_stderr": 0.034981493854624714, "acc_norm": 0.8015267175572519, "acc_norm_stderr": 0.034981493854624714 }, "harness|hendrycksTest-international_law|5": { "acc": 0.7768595041322314, "acc_stderr": 0.03800754475228732, "acc_norm": 0.7768595041322314, "acc_norm_stderr": 0.03800754475228732 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.8055555555555556, "acc_stderr": 0.038260763248848646, "acc_norm": 0.8055555555555556, "acc_norm_stderr": 0.038260763248848646 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.754601226993865, "acc_stderr": 0.03380939813943354, "acc_norm": 0.754601226993865, "acc_norm_stderr": 0.03380939813943354 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.4642857142857143, "acc_stderr": 0.04733667890053756, "acc_norm": 0.4642857142857143, "acc_norm_stderr": 0.04733667890053756 }, "harness|hendrycksTest-management|5": { "acc": 0.7766990291262136, "acc_stderr": 0.04123553189891431, "acc_norm": 0.7766990291262136, "acc_norm_stderr": 0.04123553189891431 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8846153846153846, "acc_stderr": 0.02093019318517933, "acc_norm": 0.8846153846153846, "acc_norm_stderr": 0.02093019318517933 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.72, "acc_stderr": 0.045126085985421276, "acc_norm": 0.72, "acc_norm_stderr": 0.045126085985421276 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.8314176245210728, "acc_stderr": 0.013387895731543604, "acc_norm": 0.8314176245210728, "acc_norm_stderr": 0.013387895731543604 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.7369942196531792, "acc_stderr": 0.023703099525258172, "acc_norm": 0.7369942196531792, "acc_norm_stderr": 0.023703099525258172 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.41899441340782123, "acc_stderr": 0.016501579306861677, "acc_norm": 0.41899441340782123, "acc_norm_stderr": 0.016501579306861677 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.7287581699346405, "acc_stderr": 0.02545775669666788, "acc_norm": 0.7287581699346405, "acc_norm_stderr": 0.02545775669666788 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.7170418006430869, "acc_stderr": 0.02558306248998481, "acc_norm": 0.7170418006430869, "acc_norm_stderr": 0.02558306248998481 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.75, "acc_stderr": 0.02409347123262133, "acc_norm": 0.75, "acc_norm_stderr": 0.02409347123262133 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.48936170212765956, "acc_stderr": 0.02982074719142248, "acc_norm": 0.48936170212765956, "acc_norm_stderr": 0.02982074719142248 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.4706649282920469, "acc_stderr": 0.012748238397365549, "acc_norm": 0.4706649282920469, "acc_norm_stderr": 0.012748238397365549 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.6801470588235294, "acc_stderr": 0.02833295951403121, "acc_norm": 0.6801470588235294, "acc_norm_stderr": 0.02833295951403121 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.6830065359477124, "acc_stderr": 0.018824219512706207, "acc_norm": 0.6830065359477124, "acc_norm_stderr": 0.018824219512706207 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.6727272727272727, "acc_stderr": 0.0449429086625209, "acc_norm": 0.6727272727272727, "acc_norm_stderr": 0.0449429086625209 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.726530612244898, "acc_stderr": 0.028535560337128445, "acc_norm": 0.726530612244898, "acc_norm_stderr": 0.028535560337128445 }, "harness|hendrycksTest-sociology|5": { "acc": 0.8557213930348259, "acc_stderr": 0.024845753212306053, "acc_norm": 0.8557213930348259, "acc_norm_stderr": 0.024845753212306053 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.86, "acc_stderr": 0.0348735088019777, "acc_norm": 0.86, "acc_norm_stderr": 0.0348735088019777 }, "harness|hendrycksTest-virology|5": { "acc": 0.5421686746987951, "acc_stderr": 0.0387862677100236, "acc_norm": 0.5421686746987951, "acc_norm_stderr": 0.0387862677100236 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8362573099415205, "acc_stderr": 0.028380919596145866, "acc_norm": 0.8362573099415205, "acc_norm_stderr": 0.028380919596145866 }, "harness|truthfulqa:mc|0": { "mc1": 0.44430844553243576, "mc1_stderr": 0.017394586250743173, "mc2": 0.6142182914346945, "mc2_stderr": 0.015342703381935975 }, "harness|winogrande|5": { "acc": 0.8145224940805051, "acc_stderr": 0.010923965303140505 }, "harness|gsm8k|5": { "acc": 0.7156937073540561, "acc_stderr": 0.012425078188395978 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_v1olet__v1olet_marcoroni-go-bruins-merge-7B
[ "region:us" ]
2023-12-11T22:22:57+00:00
{"pretty_name": "Evaluation run of v1olet/v1olet_marcoroni-go-bruins-merge-7B", "dataset_summary": "Dataset automatically created during the evaluation run of model [v1olet/v1olet_marcoroni-go-bruins-merge-7B](https://huggingface.co/v1olet/v1olet_marcoroni-go-bruins-merge-7B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_v1olet__v1olet_marcoroni-go-bruins-merge-7B\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-11T22:20:05.133817](https://huggingface.co/datasets/open-llm-leaderboard/details_v1olet__v1olet_marcoroni-go-bruins-merge-7B/blob/main/results_2023-12-11T22-20-05.133817.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.6562832889581685,\n \"acc_stderr\": 0.031992742713714516,\n \"acc_norm\": 0.6562130552581822,\n \"acc_norm_stderr\": 0.032653591602583326,\n \"mc1\": 0.44430844553243576,\n \"mc1_stderr\": 0.017394586250743173,\n \"mc2\": 0.6142182914346945,\n \"mc2_stderr\": 0.015342703381935975\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.6689419795221843,\n \"acc_stderr\": 0.013752062419817837,\n \"acc_norm\": 0.7005119453924915,\n \"acc_norm_stderr\": 0.013385021637313577\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6898028281218881,\n \"acc_stderr\": 0.004616288245259755,\n \"acc_norm\": 0.8717386974706234,\n \"acc_norm_stderr\": 0.0033369715351311744\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.31,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.31,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.6592592592592592,\n \"acc_stderr\": 0.04094376269996792,\n \"acc_norm\": 0.6592592592592592,\n \"acc_norm_stderr\": 0.04094376269996792\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.6907894736842105,\n \"acc_stderr\": 0.037610708698674805,\n \"acc_norm\": 0.6907894736842105,\n \"acc_norm_stderr\": 0.037610708698674805\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.67,\n \"acc_stderr\": 0.04725815626252609,\n \"acc_norm\": 0.67,\n \"acc_norm_stderr\": 0.04725815626252609\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.7245283018867924,\n \"acc_stderr\": 0.027495663683724057,\n \"acc_norm\": 0.7245283018867924,\n \"acc_norm_stderr\": 0.027495663683724057\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.7638888888888888,\n \"acc_stderr\": 0.03551446610810826,\n \"acc_norm\": 0.7638888888888888,\n \"acc_norm_stderr\": 0.03551446610810826\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.47,\n \"acc_stderr\": 0.050161355804659205,\n \"acc_norm\": 0.47,\n \"acc_norm_stderr\": 0.050161355804659205\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.56,\n \"acc_stderr\": 0.04988876515698589,\n \"acc_norm\": 0.56,\n \"acc_norm_stderr\": 0.04988876515698589\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.35,\n \"acc_stderr\": 0.047937248544110196,\n \"acc_norm\": 0.35,\n \"acc_norm_stderr\": 0.047937248544110196\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.6589595375722543,\n \"acc_stderr\": 0.036146654241808254,\n \"acc_norm\": 0.6589595375722543,\n \"acc_norm_stderr\": 0.036146654241808254\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.43137254901960786,\n \"acc_stderr\": 0.04928099597287534,\n \"acc_norm\": 0.43137254901960786,\n \"acc_norm_stderr\": 0.04928099597287534\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.76,\n \"acc_stderr\": 0.04292346959909282,\n \"acc_norm\": 0.76,\n \"acc_norm_stderr\": 0.04292346959909282\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.6,\n \"acc_stderr\": 0.03202563076101735,\n \"acc_norm\": 0.6,\n \"acc_norm_stderr\": 0.03202563076101735\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.49122807017543857,\n \"acc_stderr\": 0.04702880432049615,\n \"acc_norm\": 0.49122807017543857,\n \"acc_norm_stderr\": 0.04702880432049615\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5310344827586206,\n \"acc_stderr\": 0.04158632762097828,\n \"acc_norm\": 0.5310344827586206,\n \"acc_norm_stderr\": 0.04158632762097828\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.4365079365079365,\n \"acc_stderr\": 0.0255428468174005,\n \"acc_norm\": 0.4365079365079365,\n \"acc_norm_stderr\": 0.0255428468174005\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.5,\n \"acc_stderr\": 0.04472135954999579,\n \"acc_norm\": 0.5,\n \"acc_norm_stderr\": 0.04472135954999579\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.35,\n \"acc_stderr\": 0.047937248544110196,\n \"acc_norm\": 0.35,\n \"acc_norm_stderr\": 0.047937248544110196\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.7677419354838709,\n \"acc_stderr\": 0.024022256130308235,\n \"acc_norm\": 0.7677419354838709,\n \"acc_norm_stderr\": 0.024022256130308235\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.49261083743842365,\n \"acc_stderr\": 0.035176035403610084,\n \"acc_norm\": 0.49261083743842365,\n \"acc_norm_stderr\": 0.035176035403610084\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.71,\n \"acc_stderr\": 0.045604802157206845,\n \"acc_norm\": 0.71,\n \"acc_norm_stderr\": 0.045604802157206845\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7878787878787878,\n \"acc_stderr\": 0.031922715695483,\n \"acc_norm\": 0.7878787878787878,\n \"acc_norm_stderr\": 0.031922715695483\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.7878787878787878,\n \"acc_stderr\": 0.029126522834586815,\n \"acc_norm\": 0.7878787878787878,\n \"acc_norm_stderr\": 0.029126522834586815\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.9015544041450777,\n \"acc_stderr\": 0.02150024957603348,\n \"acc_norm\": 0.9015544041450777,\n \"acc_norm_stderr\": 0.02150024957603348\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.6794871794871795,\n \"acc_stderr\": 0.02366129639396428,\n \"acc_norm\": 0.6794871794871795,\n \"acc_norm_stderr\": 0.02366129639396428\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.34444444444444444,\n \"acc_stderr\": 0.02897264888484427,\n \"acc_norm\": 0.34444444444444444,\n \"acc_norm_stderr\": 0.02897264888484427\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.680672268907563,\n \"acc_stderr\": 0.030283995525884396,\n \"acc_norm\": 0.680672268907563,\n \"acc_norm_stderr\": 0.030283995525884396\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.3443708609271523,\n \"acc_stderr\": 0.038796870240733264,\n \"acc_norm\": 0.3443708609271523,\n \"acc_norm_stderr\": 0.038796870240733264\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.8495412844036697,\n \"acc_stderr\": 0.015328563932669235,\n \"acc_norm\": 0.8495412844036697,\n \"acc_norm_stderr\": 0.015328563932669235\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.5138888888888888,\n \"acc_stderr\": 0.03408655867977749,\n \"acc_norm\": 0.5138888888888888,\n \"acc_norm_stderr\": 0.03408655867977749\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.8333333333333334,\n \"acc_stderr\": 0.026156867523931045,\n \"acc_norm\": 0.8333333333333334,\n \"acc_norm_stderr\": 0.026156867523931045\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.8143459915611815,\n \"acc_stderr\": 0.025310495376944863,\n \"acc_norm\": 0.8143459915611815,\n \"acc_norm_stderr\": 0.025310495376944863\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.695067264573991,\n \"acc_stderr\": 0.030898610882477515,\n \"acc_norm\": 0.695067264573991,\n \"acc_norm_stderr\": 0.030898610882477515\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.8015267175572519,\n \"acc_stderr\": 0.034981493854624714,\n \"acc_norm\": 0.8015267175572519,\n \"acc_norm_stderr\": 0.034981493854624714\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.7768595041322314,\n \"acc_stderr\": 0.03800754475228732,\n \"acc_norm\": 0.7768595041322314,\n \"acc_norm_stderr\": 0.03800754475228732\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.8055555555555556,\n \"acc_stderr\": 0.038260763248848646,\n \"acc_norm\": 0.8055555555555556,\n \"acc_norm_stderr\": 0.038260763248848646\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.754601226993865,\n \"acc_stderr\": 0.03380939813943354,\n \"acc_norm\": 0.754601226993865,\n \"acc_norm_stderr\": 0.03380939813943354\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.4642857142857143,\n \"acc_stderr\": 0.04733667890053756,\n \"acc_norm\": 0.4642857142857143,\n \"acc_norm_stderr\": 0.04733667890053756\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.7766990291262136,\n \"acc_stderr\": 0.04123553189891431,\n \"acc_norm\": 0.7766990291262136,\n \"acc_norm_stderr\": 0.04123553189891431\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8846153846153846,\n \"acc_stderr\": 0.02093019318517933,\n \"acc_norm\": 0.8846153846153846,\n \"acc_norm_stderr\": 0.02093019318517933\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.72,\n \"acc_stderr\": 0.045126085985421276,\n \"acc_norm\": 0.72,\n \"acc_norm_stderr\": 0.045126085985421276\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.8314176245210728,\n \"acc_stderr\": 0.013387895731543604,\n \"acc_norm\": 0.8314176245210728,\n \"acc_norm_stderr\": 0.013387895731543604\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.7369942196531792,\n \"acc_stderr\": 0.023703099525258172,\n \"acc_norm\": 0.7369942196531792,\n \"acc_norm_stderr\": 0.023703099525258172\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.41899441340782123,\n \"acc_stderr\": 0.016501579306861677,\n \"acc_norm\": 0.41899441340782123,\n \"acc_norm_stderr\": 0.016501579306861677\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.7287581699346405,\n \"acc_stderr\": 0.02545775669666788,\n \"acc_norm\": 0.7287581699346405,\n \"acc_norm_stderr\": 0.02545775669666788\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.7170418006430869,\n \"acc_stderr\": 0.02558306248998481,\n \"acc_norm\": 0.7170418006430869,\n \"acc_norm_stderr\": 0.02558306248998481\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.75,\n \"acc_stderr\": 0.02409347123262133,\n \"acc_norm\": 0.75,\n \"acc_norm_stderr\": 0.02409347123262133\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.48936170212765956,\n \"acc_stderr\": 0.02982074719142248,\n \"acc_norm\": 0.48936170212765956,\n \"acc_norm_stderr\": 0.02982074719142248\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.4706649282920469,\n \"acc_stderr\": 0.012748238397365549,\n \"acc_norm\": 0.4706649282920469,\n \"acc_norm_stderr\": 0.012748238397365549\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.6801470588235294,\n \"acc_stderr\": 0.02833295951403121,\n \"acc_norm\": 0.6801470588235294,\n \"acc_norm_stderr\": 0.02833295951403121\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.6830065359477124,\n \"acc_stderr\": 0.018824219512706207,\n \"acc_norm\": 0.6830065359477124,\n \"acc_norm_stderr\": 0.018824219512706207\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6727272727272727,\n \"acc_stderr\": 0.0449429086625209,\n \"acc_norm\": 0.6727272727272727,\n \"acc_norm_stderr\": 0.0449429086625209\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.726530612244898,\n \"acc_stderr\": 0.028535560337128445,\n \"acc_norm\": 0.726530612244898,\n \"acc_norm_stderr\": 0.028535560337128445\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.8557213930348259,\n \"acc_stderr\": 0.024845753212306053,\n \"acc_norm\": 0.8557213930348259,\n \"acc_norm_stderr\": 0.024845753212306053\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.86,\n \"acc_stderr\": 0.0348735088019777,\n \"acc_norm\": 0.86,\n \"acc_norm_stderr\": 0.0348735088019777\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.5421686746987951,\n \"acc_stderr\": 0.0387862677100236,\n \"acc_norm\": 0.5421686746987951,\n \"acc_norm_stderr\": 0.0387862677100236\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8362573099415205,\n \"acc_stderr\": 0.028380919596145866,\n \"acc_norm\": 0.8362573099415205,\n \"acc_norm_stderr\": 0.028380919596145866\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.44430844553243576,\n \"mc1_stderr\": 0.017394586250743173,\n \"mc2\": 0.6142182914346945,\n \"mc2_stderr\": 0.015342703381935975\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.8145224940805051,\n \"acc_stderr\": 0.010923965303140505\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.7156937073540561,\n \"acc_stderr\": 0.012425078188395978\n }\n}\n```", "repo_url": "https://huggingface.co/v1olet/v1olet_marcoroni-go-bruins-merge-7B", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|arc:challenge|25_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|gsm8k|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hellaswag|10_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-11T22-20-05.133817.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["**/details_harness|winogrande|5_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-11T22-20-05.133817.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_11T22_20_05.133817", "path": ["results_2023-12-11T22-20-05.133817.parquet"]}, {"split": "latest", "path": ["results_2023-12-11T22-20-05.133817.parquet"]}]}]}
2023-12-11T22:23:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of v1olet/v1olet_marcoroni-go-bruins-merge-7B Dataset automatically created during the evaluation run of model v1olet/v1olet_marcoroni-go-bruins-merge-7B on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-11T22:20:05.133817(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of v1olet/v1olet_marcoroni-go-bruins-merge-7B\n\n\n\nDataset automatically created during the evaluation run of model v1olet/v1olet_marcoroni-go-bruins-merge-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-11T22:20:05.133817(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of v1olet/v1olet_marcoroni-go-bruins-merge-7B\n\n\n\nDataset automatically created during the evaluation run of model v1olet/v1olet_marcoroni-go-bruins-merge-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-11T22:20:05.133817(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 207, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of v1olet/v1olet_marcoroni-go-bruins-merge-7B\n\n\n\nDataset automatically created during the evaluation run of model v1olet/v1olet_marcoroni-go-bruins-merge-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-11T22:20:05.133817(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]" ]
629e12e224cbab2a355b645198d66ac4e5e7fb91
# Dataset Card for Evaluation run of Toten5/Marcoroni-v3-neural-chat-v3-3-Slerp <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [Toten5/Marcoroni-v3-neural-chat-v3-3-Slerp](https://huggingface.co/Toten5/Marcoroni-v3-neural-chat-v3-3-Slerp) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_Toten5__Marcoroni-v3-neural-chat-v3-3-Slerp", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-11T22:21:10.174265](https://huggingface.co/datasets/open-llm-leaderboard/details_Toten5__Marcoroni-v3-neural-chat-v3-3-Slerp/blob/main/results_2023-12-11T22-21-10.174265.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.6497954997030103, "acc_stderr": 0.03218797050617161, "acc_norm": 0.6495568440119162, "acc_norm_stderr": 0.032855200604616566, "mc1": 0.47613219094247244, "mc1_stderr": 0.017483547156961574, "mc2": 0.6270127709181503, "mc2_stderr": 0.015065515223932825 }, "harness|arc:challenge|25": { "acc": 0.6621160409556314, "acc_stderr": 0.013822047922283512, "acc_norm": 0.6877133105802048, "acc_norm_stderr": 0.013542598541688067 }, "harness|hellaswag|10": { "acc": 0.6793467436765585, "acc_stderr": 0.004657738398900938, "acc_norm": 0.8654650468034256, "acc_norm_stderr": 0.003405288007233203 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.29, "acc_stderr": 0.045604802157206845, "acc_norm": 0.29, "acc_norm_stderr": 0.045604802157206845 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.6296296296296297, "acc_stderr": 0.041716541613545426, "acc_norm": 0.6296296296296297, "acc_norm_stderr": 0.041716541613545426 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.6907894736842105, "acc_stderr": 0.037610708698674805, "acc_norm": 0.6907894736842105, "acc_norm_stderr": 0.037610708698674805 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.63, "acc_stderr": 0.04852365870939099, "acc_norm": 0.63, "acc_norm_stderr": 0.04852365870939099 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.7056603773584905, "acc_stderr": 0.028049186315695248, "acc_norm": 0.7056603773584905, "acc_norm_stderr": 0.028049186315695248 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.7569444444444444, "acc_stderr": 0.03586879280080341, "acc_norm": 0.7569444444444444, "acc_norm_stderr": 0.03586879280080341 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.53, "acc_stderr": 0.050161355804659205, "acc_norm": 0.53, "acc_norm_stderr": 0.050161355804659205 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.58, "acc_stderr": 0.049604496374885836, "acc_norm": 0.58, "acc_norm_stderr": 0.049604496374885836 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.33, "acc_stderr": 0.047258156262526045, "acc_norm": 0.33, "acc_norm_stderr": 0.047258156262526045 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.6763005780346821, "acc_stderr": 0.035676037996391706, "acc_norm": 0.6763005780346821, "acc_norm_stderr": 0.035676037996391706 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.49019607843137253, "acc_stderr": 0.04974229460422817, "acc_norm": 0.49019607843137253, "acc_norm_stderr": 0.04974229460422817 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.75, "acc_stderr": 0.04351941398892446, "acc_norm": 0.75, "acc_norm_stderr": 0.04351941398892446 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.5914893617021276, "acc_stderr": 0.032134180267015755, "acc_norm": 0.5914893617021276, "acc_norm_stderr": 0.032134180267015755 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.5, "acc_stderr": 0.047036043419179864, "acc_norm": 0.5, "acc_norm_stderr": 0.047036043419179864 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.5379310344827586, "acc_stderr": 0.04154659671707548, "acc_norm": 0.5379310344827586, "acc_norm_stderr": 0.04154659671707548 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.42592592592592593, "acc_stderr": 0.025467149045469557, "acc_norm": 0.42592592592592593, "acc_norm_stderr": 0.025467149045469557 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.4444444444444444, "acc_stderr": 0.044444444444444495, "acc_norm": 0.4444444444444444, "acc_norm_stderr": 0.044444444444444495 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.35, "acc_stderr": 0.047937248544110196, "acc_norm": 0.35, "acc_norm_stderr": 0.047937248544110196 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.7709677419354839, "acc_stderr": 0.023904914311782655, "acc_norm": 0.7709677419354839, "acc_norm_stderr": 0.023904914311782655 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.49261083743842365, "acc_stderr": 0.035176035403610084, "acc_norm": 0.49261083743842365, "acc_norm_stderr": 0.035176035403610084 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.69, "acc_stderr": 0.04648231987117316, "acc_norm": 0.69, "acc_norm_stderr": 0.04648231987117316 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.7818181818181819, "acc_stderr": 0.03225078108306289, "acc_norm": 0.7818181818181819, "acc_norm_stderr": 0.03225078108306289 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.797979797979798, "acc_stderr": 0.028606204289229872, "acc_norm": 0.797979797979798, "acc_norm_stderr": 0.028606204289229872 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.8860103626943006, "acc_stderr": 0.022935144053919443, "acc_norm": 0.8860103626943006, "acc_norm_stderr": 0.022935144053919443 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.6641025641025641, "acc_stderr": 0.023946724741563976, "acc_norm": 0.6641025641025641, "acc_norm_stderr": 0.023946724741563976 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.3333333333333333, "acc_stderr": 0.028742040903948485, "acc_norm": 0.3333333333333333, "acc_norm_stderr": 0.028742040903948485 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.6722689075630253, "acc_stderr": 0.03048991141767323, "acc_norm": 0.6722689075630253, "acc_norm_stderr": 0.03048991141767323 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.304635761589404, "acc_stderr": 0.03757949922943343, "acc_norm": 0.304635761589404, "acc_norm_stderr": 0.03757949922943343 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.8458715596330275, "acc_stderr": 0.015480826865374307, "acc_norm": 0.8458715596330275, "acc_norm_stderr": 0.015480826865374307 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.5231481481481481, "acc_stderr": 0.03406315360711507, "acc_norm": 0.5231481481481481, "acc_norm_stderr": 0.03406315360711507 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.7990196078431373, "acc_stderr": 0.028125972265654373, "acc_norm": 0.7990196078431373, "acc_norm_stderr": 0.028125972265654373 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.7974683544303798, "acc_stderr": 0.026160568246601443, "acc_norm": 0.7974683544303798, "acc_norm_stderr": 0.026160568246601443 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.6771300448430493, "acc_stderr": 0.031381476375754995, "acc_norm": 0.6771300448430493, "acc_norm_stderr": 0.031381476375754995 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.7633587786259542, "acc_stderr": 0.03727673575596913, "acc_norm": 0.7633587786259542, "acc_norm_stderr": 0.03727673575596913 }, "harness|hendrycksTest-international_law|5": { "acc": 0.8016528925619835, "acc_stderr": 0.03640118271990946, "acc_norm": 0.8016528925619835, "acc_norm_stderr": 0.03640118271990946 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.7685185185185185, "acc_stderr": 0.04077494709252626, "acc_norm": 0.7685185185185185, "acc_norm_stderr": 0.04077494709252626 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.7730061349693251, "acc_stderr": 0.03291099578615769, "acc_norm": 0.7730061349693251, "acc_norm_stderr": 0.03291099578615769 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.4642857142857143, "acc_stderr": 0.04733667890053756, "acc_norm": 0.4642857142857143, "acc_norm_stderr": 0.04733667890053756 }, "harness|hendrycksTest-management|5": { "acc": 0.7766990291262136, "acc_stderr": 0.04123553189891431, "acc_norm": 0.7766990291262136, "acc_norm_stderr": 0.04123553189891431 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8846153846153846, "acc_stderr": 0.02093019318517933, "acc_norm": 0.8846153846153846, "acc_norm_stderr": 0.02093019318517933 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.7, "acc_stderr": 0.046056618647183814, "acc_norm": 0.7, "acc_norm_stderr": 0.046056618647183814 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.8275862068965517, "acc_stderr": 0.013507943909371802, "acc_norm": 0.8275862068965517, "acc_norm_stderr": 0.013507943909371802 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.7427745664739884, "acc_stderr": 0.023532925431044287, "acc_norm": 0.7427745664739884, "acc_norm_stderr": 0.023532925431044287 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.42681564245810055, "acc_stderr": 0.016542401954631917, "acc_norm": 0.42681564245810055, "acc_norm_stderr": 0.016542401954631917 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.7189542483660131, "acc_stderr": 0.025738854797818737, "acc_norm": 0.7189542483660131, "acc_norm_stderr": 0.025738854797818737 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.7009646302250804, "acc_stderr": 0.02600330111788514, "acc_norm": 0.7009646302250804, "acc_norm_stderr": 0.02600330111788514 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.7314814814814815, "acc_stderr": 0.024659685185967287, "acc_norm": 0.7314814814814815, "acc_norm_stderr": 0.024659685185967287 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.46808510638297873, "acc_stderr": 0.029766675075873866, "acc_norm": 0.46808510638297873, "acc_norm_stderr": 0.029766675075873866 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.45371577574967403, "acc_stderr": 0.012715404841277738, "acc_norm": 0.45371577574967403, "acc_norm_stderr": 0.012715404841277738 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.6764705882352942, "acc_stderr": 0.02841820861940676, "acc_norm": 0.6764705882352942, "acc_norm_stderr": 0.02841820861940676 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.6633986928104575, "acc_stderr": 0.019117213911495144, "acc_norm": 0.6633986928104575, "acc_norm_stderr": 0.019117213911495144 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.6818181818181818, "acc_stderr": 0.04461272175910509, "acc_norm": 0.6818181818181818, "acc_norm_stderr": 0.04461272175910509 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.763265306122449, "acc_stderr": 0.02721283588407316, "acc_norm": 0.763265306122449, "acc_norm_stderr": 0.02721283588407316 }, "harness|hendrycksTest-sociology|5": { "acc": 0.8507462686567164, "acc_stderr": 0.025196929874827075, "acc_norm": 0.8507462686567164, "acc_norm_stderr": 0.025196929874827075 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.85, "acc_stderr": 0.0358870281282637, "acc_norm": 0.85, "acc_norm_stderr": 0.0358870281282637 }, "harness|hendrycksTest-virology|5": { "acc": 0.5180722891566265, "acc_stderr": 0.03889951252827216, "acc_norm": 0.5180722891566265, "acc_norm_stderr": 0.03889951252827216 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8187134502923976, "acc_stderr": 0.029547741687640038, "acc_norm": 0.8187134502923976, "acc_norm_stderr": 0.029547741687640038 }, "harness|truthfulqa:mc|0": { "mc1": 0.47613219094247244, "mc1_stderr": 0.017483547156961574, "mc2": 0.6270127709181503, "mc2_stderr": 0.015065515223932825 }, "harness|winogrande|5": { "acc": 0.8074191002367798, "acc_stderr": 0.011082538847491906 }, "harness|gsm8k|5": { "acc": 0.7179681576952237, "acc_stderr": 0.012394926584335688 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_Toten5__Marcoroni-v3-neural-chat-v3-3-Slerp
[ "region:us" ]
2023-12-11T22:24:06+00:00
{"pretty_name": "Evaluation run of Toten5/Marcoroni-v3-neural-chat-v3-3-Slerp", "dataset_summary": "Dataset automatically created during the evaluation run of model [Toten5/Marcoroni-v3-neural-chat-v3-3-Slerp](https://huggingface.co/Toten5/Marcoroni-v3-neural-chat-v3-3-Slerp) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_Toten5__Marcoroni-v3-neural-chat-v3-3-Slerp\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-11T22:21:10.174265](https://huggingface.co/datasets/open-llm-leaderboard/details_Toten5__Marcoroni-v3-neural-chat-v3-3-Slerp/blob/main/results_2023-12-11T22-21-10.174265.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.6497954997030103,\n \"acc_stderr\": 0.03218797050617161,\n \"acc_norm\": 0.6495568440119162,\n \"acc_norm_stderr\": 0.032855200604616566,\n \"mc1\": 0.47613219094247244,\n \"mc1_stderr\": 0.017483547156961574,\n \"mc2\": 0.6270127709181503,\n \"mc2_stderr\": 0.015065515223932825\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.6621160409556314,\n \"acc_stderr\": 0.013822047922283512,\n \"acc_norm\": 0.6877133105802048,\n \"acc_norm_stderr\": 0.013542598541688067\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6793467436765585,\n \"acc_stderr\": 0.004657738398900938,\n \"acc_norm\": 0.8654650468034256,\n \"acc_norm_stderr\": 0.003405288007233203\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.29,\n \"acc_stderr\": 0.045604802157206845,\n \"acc_norm\": 0.29,\n \"acc_norm_stderr\": 0.045604802157206845\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.6296296296296297,\n \"acc_stderr\": 0.041716541613545426,\n \"acc_norm\": 0.6296296296296297,\n \"acc_norm_stderr\": 0.041716541613545426\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.6907894736842105,\n \"acc_stderr\": 0.037610708698674805,\n \"acc_norm\": 0.6907894736842105,\n \"acc_norm_stderr\": 0.037610708698674805\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.63,\n \"acc_stderr\": 0.04852365870939099,\n \"acc_norm\": 0.63,\n \"acc_norm_stderr\": 0.04852365870939099\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.7056603773584905,\n \"acc_stderr\": 0.028049186315695248,\n \"acc_norm\": 0.7056603773584905,\n \"acc_norm_stderr\": 0.028049186315695248\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.7569444444444444,\n \"acc_stderr\": 0.03586879280080341,\n \"acc_norm\": 0.7569444444444444,\n \"acc_norm_stderr\": 0.03586879280080341\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.53,\n \"acc_stderr\": 0.050161355804659205,\n \"acc_norm\": 0.53,\n \"acc_norm_stderr\": 0.050161355804659205\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.58,\n \"acc_stderr\": 0.049604496374885836,\n \"acc_norm\": 0.58,\n \"acc_norm_stderr\": 0.049604496374885836\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.33,\n \"acc_stderr\": 0.047258156262526045,\n \"acc_norm\": 0.33,\n \"acc_norm_stderr\": 0.047258156262526045\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.6763005780346821,\n \"acc_stderr\": 0.035676037996391706,\n \"acc_norm\": 0.6763005780346821,\n \"acc_norm_stderr\": 0.035676037996391706\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.49019607843137253,\n \"acc_stderr\": 0.04974229460422817,\n \"acc_norm\": 0.49019607843137253,\n \"acc_norm_stderr\": 0.04974229460422817\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.75,\n \"acc_stderr\": 0.04351941398892446,\n \"acc_norm\": 0.75,\n \"acc_norm_stderr\": 0.04351941398892446\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.5914893617021276,\n \"acc_stderr\": 0.032134180267015755,\n \"acc_norm\": 0.5914893617021276,\n \"acc_norm_stderr\": 0.032134180267015755\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.5,\n \"acc_stderr\": 0.047036043419179864,\n \"acc_norm\": 0.5,\n \"acc_norm_stderr\": 0.047036043419179864\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5379310344827586,\n \"acc_stderr\": 0.04154659671707548,\n \"acc_norm\": 0.5379310344827586,\n \"acc_norm_stderr\": 0.04154659671707548\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.42592592592592593,\n \"acc_stderr\": 0.025467149045469557,\n \"acc_norm\": 0.42592592592592593,\n \"acc_norm_stderr\": 0.025467149045469557\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.4444444444444444,\n \"acc_stderr\": 0.044444444444444495,\n \"acc_norm\": 0.4444444444444444,\n \"acc_norm_stderr\": 0.044444444444444495\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.35,\n \"acc_stderr\": 0.047937248544110196,\n \"acc_norm\": 0.35,\n \"acc_norm_stderr\": 0.047937248544110196\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.7709677419354839,\n \"acc_stderr\": 0.023904914311782655,\n \"acc_norm\": 0.7709677419354839,\n \"acc_norm_stderr\": 0.023904914311782655\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.49261083743842365,\n \"acc_stderr\": 0.035176035403610084,\n \"acc_norm\": 0.49261083743842365,\n \"acc_norm_stderr\": 0.035176035403610084\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.69,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.69,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7818181818181819,\n \"acc_stderr\": 0.03225078108306289,\n \"acc_norm\": 0.7818181818181819,\n \"acc_norm_stderr\": 0.03225078108306289\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.797979797979798,\n \"acc_stderr\": 0.028606204289229872,\n \"acc_norm\": 0.797979797979798,\n \"acc_norm_stderr\": 0.028606204289229872\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.8860103626943006,\n \"acc_stderr\": 0.022935144053919443,\n \"acc_norm\": 0.8860103626943006,\n \"acc_norm_stderr\": 0.022935144053919443\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.6641025641025641,\n \"acc_stderr\": 0.023946724741563976,\n \"acc_norm\": 0.6641025641025641,\n \"acc_norm_stderr\": 0.023946724741563976\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.3333333333333333,\n \"acc_stderr\": 0.028742040903948485,\n \"acc_norm\": 0.3333333333333333,\n \"acc_norm_stderr\": 0.028742040903948485\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.6722689075630253,\n \"acc_stderr\": 0.03048991141767323,\n \"acc_norm\": 0.6722689075630253,\n \"acc_norm_stderr\": 0.03048991141767323\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.304635761589404,\n \"acc_stderr\": 0.03757949922943343,\n \"acc_norm\": 0.304635761589404,\n \"acc_norm_stderr\": 0.03757949922943343\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.8458715596330275,\n \"acc_stderr\": 0.015480826865374307,\n \"acc_norm\": 0.8458715596330275,\n \"acc_norm_stderr\": 0.015480826865374307\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.5231481481481481,\n \"acc_stderr\": 0.03406315360711507,\n \"acc_norm\": 0.5231481481481481,\n \"acc_norm_stderr\": 0.03406315360711507\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.7990196078431373,\n \"acc_stderr\": 0.028125972265654373,\n \"acc_norm\": 0.7990196078431373,\n \"acc_norm_stderr\": 0.028125972265654373\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.7974683544303798,\n \"acc_stderr\": 0.026160568246601443,\n \"acc_norm\": 0.7974683544303798,\n \"acc_norm_stderr\": 0.026160568246601443\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6771300448430493,\n \"acc_stderr\": 0.031381476375754995,\n \"acc_norm\": 0.6771300448430493,\n \"acc_norm_stderr\": 0.031381476375754995\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.7633587786259542,\n \"acc_stderr\": 0.03727673575596913,\n \"acc_norm\": 0.7633587786259542,\n \"acc_norm_stderr\": 0.03727673575596913\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.8016528925619835,\n \"acc_stderr\": 0.03640118271990946,\n \"acc_norm\": 0.8016528925619835,\n \"acc_norm_stderr\": 0.03640118271990946\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.7685185185185185,\n \"acc_stderr\": 0.04077494709252626,\n \"acc_norm\": 0.7685185185185185,\n \"acc_norm_stderr\": 0.04077494709252626\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.7730061349693251,\n \"acc_stderr\": 0.03291099578615769,\n \"acc_norm\": 0.7730061349693251,\n \"acc_norm_stderr\": 0.03291099578615769\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.4642857142857143,\n \"acc_stderr\": 0.04733667890053756,\n \"acc_norm\": 0.4642857142857143,\n \"acc_norm_stderr\": 0.04733667890053756\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.7766990291262136,\n \"acc_stderr\": 0.04123553189891431,\n \"acc_norm\": 0.7766990291262136,\n \"acc_norm_stderr\": 0.04123553189891431\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8846153846153846,\n \"acc_stderr\": 0.02093019318517933,\n \"acc_norm\": 0.8846153846153846,\n \"acc_norm_stderr\": 0.02093019318517933\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.7,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.7,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.8275862068965517,\n \"acc_stderr\": 0.013507943909371802,\n \"acc_norm\": 0.8275862068965517,\n \"acc_norm_stderr\": 0.013507943909371802\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.7427745664739884,\n \"acc_stderr\": 0.023532925431044287,\n \"acc_norm\": 0.7427745664739884,\n \"acc_norm_stderr\": 0.023532925431044287\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.42681564245810055,\n \"acc_stderr\": 0.016542401954631917,\n \"acc_norm\": 0.42681564245810055,\n \"acc_norm_stderr\": 0.016542401954631917\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.7189542483660131,\n \"acc_stderr\": 0.025738854797818737,\n \"acc_norm\": 0.7189542483660131,\n \"acc_norm_stderr\": 0.025738854797818737\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.7009646302250804,\n \"acc_stderr\": 0.02600330111788514,\n \"acc_norm\": 0.7009646302250804,\n \"acc_norm_stderr\": 0.02600330111788514\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.7314814814814815,\n \"acc_stderr\": 0.024659685185967287,\n \"acc_norm\": 0.7314814814814815,\n \"acc_norm_stderr\": 0.024659685185967287\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.46808510638297873,\n \"acc_stderr\": 0.029766675075873866,\n \"acc_norm\": 0.46808510638297873,\n \"acc_norm_stderr\": 0.029766675075873866\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.45371577574967403,\n \"acc_stderr\": 0.012715404841277738,\n \"acc_norm\": 0.45371577574967403,\n \"acc_norm_stderr\": 0.012715404841277738\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.6764705882352942,\n \"acc_stderr\": 0.02841820861940676,\n \"acc_norm\": 0.6764705882352942,\n \"acc_norm_stderr\": 0.02841820861940676\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.6633986928104575,\n \"acc_stderr\": 0.019117213911495144,\n \"acc_norm\": 0.6633986928104575,\n \"acc_norm_stderr\": 0.019117213911495144\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6818181818181818,\n \"acc_stderr\": 0.04461272175910509,\n \"acc_norm\": 0.6818181818181818,\n \"acc_norm_stderr\": 0.04461272175910509\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.763265306122449,\n \"acc_stderr\": 0.02721283588407316,\n \"acc_norm\": 0.763265306122449,\n \"acc_norm_stderr\": 0.02721283588407316\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.8507462686567164,\n \"acc_stderr\": 0.025196929874827075,\n \"acc_norm\": 0.8507462686567164,\n \"acc_norm_stderr\": 0.025196929874827075\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.85,\n \"acc_stderr\": 0.0358870281282637,\n \"acc_norm\": 0.85,\n \"acc_norm_stderr\": 0.0358870281282637\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.5180722891566265,\n \"acc_stderr\": 0.03889951252827216,\n \"acc_norm\": 0.5180722891566265,\n \"acc_norm_stderr\": 0.03889951252827216\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8187134502923976,\n \"acc_stderr\": 0.029547741687640038,\n \"acc_norm\": 0.8187134502923976,\n \"acc_norm_stderr\": 0.029547741687640038\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.47613219094247244,\n \"mc1_stderr\": 0.017483547156961574,\n \"mc2\": 0.6270127709181503,\n \"mc2_stderr\": 0.015065515223932825\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.8074191002367798,\n \"acc_stderr\": 0.011082538847491906\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.7179681576952237,\n \"acc_stderr\": 0.012394926584335688\n }\n}\n```", "repo_url": "https://huggingface.co/Toten5/Marcoroni-v3-neural-chat-v3-3-Slerp", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|arc:challenge|25_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|gsm8k|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hellaswag|10_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-11T22-21-10.174265.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["**/details_harness|winogrande|5_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-11T22-21-10.174265.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_11T22_21_10.174265", "path": ["results_2023-12-11T22-21-10.174265.parquet"]}, {"split": "latest", "path": ["results_2023-12-11T22-21-10.174265.parquet"]}]}]}
2023-12-11T22:24:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of Toten5/Marcoroni-v3-neural-chat-v3-3-Slerp Dataset automatically created during the evaluation run of model Toten5/Marcoroni-v3-neural-chat-v3-3-Slerp on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-11T22:21:10.174265(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of Toten5/Marcoroni-v3-neural-chat-v3-3-Slerp\n\n\n\nDataset automatically created during the evaluation run of model Toten5/Marcoroni-v3-neural-chat-v3-3-Slerp on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-11T22:21:10.174265(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of Toten5/Marcoroni-v3-neural-chat-v3-3-Slerp\n\n\n\nDataset automatically created during the evaluation run of model Toten5/Marcoroni-v3-neural-chat-v3-3-Slerp on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-11T22:21:10.174265(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 203, 66, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of Toten5/Marcoroni-v3-neural-chat-v3-3-Slerp\n\n\n\nDataset automatically created during the evaluation run of model Toten5/Marcoroni-v3-neural-chat-v3-3-Slerp on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-11T22:21:10.174265(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]" ]
f11ff2d2b8d9bc96365dcb164e502c119697b4bb
# Dataset Card for "counterfactual_babylm_aann_indef_anan" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kanishka/counterfactual_babylm_aann_indef_anan
[ "region:us" ]
2023-12-11T22:27:59+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 581833637, "num_examples": 11632617}, {"name": "validation", "num_bytes": 56120230, "num_examples": 1026747}], "download_size": 0, "dataset_size": 637953867}}
2023-12-13T01:24:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "counterfactual_babylm_aann_indef_anan" More Information needed
[ "# Dataset Card for \"counterfactual_babylm_aann_indef_anan\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"counterfactual_babylm_aann_indef_anan\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"counterfactual_babylm_aann_indef_anan\"\n\nMore Information needed" ]
6f55b37695bd5e7f210184d42f8d1c615fa6bdd1
# Dataset Card for "counterfactual_babylm_aann_all_det_anan" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kanishka/counterfactual_babylm_aann_all_det_anan
[ "region:us" ]
2023-12-11T22:28:21+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 581871592, "num_examples": 11632617}, {"name": "validation", "num_bytes": 56120230, "num_examples": 1026747}], "download_size": 0, "dataset_size": 637991822}}
2023-12-13T01:24:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "counterfactual_babylm_aann_all_det_anan" More Information needed
[ "# Dataset Card for \"counterfactual_babylm_aann_all_det_anan\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"counterfactual_babylm_aann_all_det_anan\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"counterfactual_babylm_aann_all_det_anan\"\n\nMore Information needed" ]
73d3fb4732b7e1a58bc3c9da158e9be24656faf8
# Dataset Card for "counterfactual_babylm_aann_indef_naan" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kanishka/counterfactual_babylm_aann_indef_naan
[ "region:us" ]
2023-12-11T22:28:37+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 581833803, "num_examples": 11632617}, {"name": "validation", "num_bytes": 56120230, "num_examples": 1026747}], "download_size": 0, "dataset_size": 637954033}}
2023-12-13T01:24:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "counterfactual_babylm_aann_indef_naan" More Information needed
[ "# Dataset Card for \"counterfactual_babylm_aann_indef_naan\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"counterfactual_babylm_aann_indef_naan\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"counterfactual_babylm_aann_indef_naan\"\n\nMore Information needed" ]
87633873af0ae9ce27877f0dd14da3c57af763be
# Dataset Card for "counterfactual_babylm_aann_all_det_naan" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kanishka/counterfactual_babylm_aann_all_det_naan
[ "region:us" ]
2023-12-11T22:28:58+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 581871758, "num_examples": 11632617}, {"name": "validation", "num_bytes": 56120230, "num_examples": 1026747}], "download_size": 0, "dataset_size": 637991988}}
2023-12-13T01:24:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "counterfactual_babylm_aann_all_det_naan" More Information needed
[ "# Dataset Card for \"counterfactual_babylm_aann_all_det_naan\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"counterfactual_babylm_aann_all_det_naan\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"counterfactual_babylm_aann_all_det_naan\"\n\nMore Information needed" ]
11891b25af35971e56d5ef486b7c5f24b8c488b9
# Dataset Card for "counterfactual_babylm_aann_indef_removal" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kanishka/counterfactual_babylm_aann_indef_removal
[ "region:us" ]
2023-12-11T22:29:21+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 581821948, "num_examples": 11635848}, {"name": "validation", "num_bytes": 56120230, "num_examples": 1026747}], "download_size": 0, "dataset_size": 637942178}}
2023-12-13T01:24:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "counterfactual_babylm_aann_indef_removal" More Information needed
[ "# Dataset Card for \"counterfactual_babylm_aann_indef_removal\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"counterfactual_babylm_aann_indef_removal\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"counterfactual_babylm_aann_indef_removal\"\n\nMore Information needed" ]
c6fcdfff8a027cceb05edc30010ceb8a6aa3b5bd
# Dataset Card for "counterfactual_babylm_aann_all_det_removal" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kanishka/counterfactual_babylm_aann_all_det_removal
[ "region:us" ]
2023-12-11T22:29:45+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 581806165, "num_examples": 11647204}, {"name": "validation", "num_bytes": 56120230, "num_examples": 1026747}], "download_size": 0, "dataset_size": 637926395}}
2023-12-13T01:24:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "counterfactual_babylm_aann_all_det_removal" More Information needed
[ "# Dataset Card for \"counterfactual_babylm_aann_all_det_removal\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"counterfactual_babylm_aann_all_det_removal\"\n\nMore Information needed" ]
[ 6, 27 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"counterfactual_babylm_aann_all_det_removal\"\n\nMore Information needed" ]
d62fa0059d2533349b9bcea55d236ed44b0e1c27
# Dataset Card for "counterfactual_training_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kanishka/counterfactual_training_test
[ "region:us" ]
2023-12-11T22:30:04+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 79390, "num_examples": 1000}, {"name": "validation", "num_bytes": 56120230, "num_examples": 1026747}], "download_size": 0, "dataset_size": 56199620}}
2023-12-13T01:25:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "counterfactual_training_test" More Information needed
[ "# Dataset Card for \"counterfactual_training_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"counterfactual_training_test\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"counterfactual_training_test\"\n\nMore Information needed" ]
df1985911592fd6c0b941617e798194734b5c661
# Dataset Card for "counterfactual_babylm_aann_indef_articles_with_pl_nouns_removal" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kanishka/counterfactual_babylm_aann_indef_articles_with_pl_nouns_removal
[ "region:us" ]
2023-12-11T22:30:12+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 581810331, "num_examples": 11662188}, {"name": "validation", "num_bytes": 56120230, "num_examples": 1026747}], "download_size": 421777159, "dataset_size": 637930561}}
2023-12-11T22:30:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "counterfactual_babylm_aann_indef_articles_with_pl_nouns_removal" More Information needed
[ "# Dataset Card for \"counterfactual_babylm_aann_indef_articles_with_pl_nouns_removal\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"counterfactual_babylm_aann_indef_articles_with_pl_nouns_removal\"\n\nMore Information needed" ]
[ 6, 36 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"counterfactual_babylm_aann_indef_articles_with_pl_nouns_removal\"\n\nMore Information needed" ]
7e6a211dbd0bf8a7c1bdfdfe767c0e6bdf2f1cdc
# Dataset Card for "librispeech_asr-audiodec_encodec_24k" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
anthony-wss/librispeech_asr-audiodec_encodec_24k
[ "region:us" ]
2023-12-11T23:05:24+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train.clean.360", "path": "data/train.clean.360-*"}, {"split": "train.other.500", "path": "data/train.other.500-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "unit", "sequence": {"sequence": "int64"}}], "splits": [{"name": "train.clean.360", "num_bytes": 1070603220, "num_examples": 104014}, {"name": "train.other.500", "num_bytes": 1462474737, "num_examples": 148688}], "download_size": 406727746, "dataset_size": 2533077957}}
2023-12-12T00:25:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "librispeech_asr-audiodec_encodec_24k" More Information needed
[ "# Dataset Card for \"librispeech_asr-audiodec_encodec_24k\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"librispeech_asr-audiodec_encodec_24k\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"librispeech_asr-audiodec_encodec_24k\"\n\nMore Information needed" ]
1997a636fefd497a2a942c373fa54ac5e61a6f9c
# Dataset Card for "metal-python-ood-climate-explanatations" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lum-ai/metal-python-ood-climate-explanatations
[ "region:us" ]
2023-12-11T23:22:05+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "chunk_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "start_text", "dtype": "int64"}, {"name": "stop_text", "dtype": "int64"}, {"name": "code", "dtype": "string"}, {"name": "start_code", "dtype": "int64"}, {"name": "stop_code", "dtype": "int64"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 799974, "num_examples": 94}], "download_size": 48110, "dataset_size": 799974}}
2023-12-11T23:22:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metal-python-ood-climate-explanatations" More Information needed
[ "# Dataset Card for \"metal-python-ood-climate-explanatations\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metal-python-ood-climate-explanatations\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"metal-python-ood-climate-explanatations\"\n\nMore Information needed" ]
c27a4a09d45d6ab62ad684d1e6db482b52622f71
# Dataset Card for "augmented-vsr-v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ktennyson6/augmented-vsr-v2
[ "region:us" ]
2023-12-11T23:58:46+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}, {"name": "relation", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 981790431.631, "num_examples": 6237}], "download_size": 852413763, "dataset_size": 981790431.631}}
2023-12-11T23:59:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "augmented-vsr-v2" More Information needed
[ "# Dataset Card for \"augmented-vsr-v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"augmented-vsr-v2\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"augmented-vsr-v2\"\n\nMore Information needed" ]
f149ba2f74a66c771992e74164139b4ea97349da
# Code de la sécurité sociale, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - `instruction`: `string`, presenting the instruction linked to the element. - `input`: `string`, signifying the input details for the element. - `output`: `string`, indicating the output information for the element. We used the following list of instructions for generating the dataset: ```python instructions = [ "Compose l'intégralité de l'article sous forme écrite.", "Écris la totalité du contenu de l'article.", "Formule la totalité du texte présent dans l'article.", "Produis l'intégralité de l'article en écriture.", "Développe l'article dans son ensemble par écrit.", "Génère l'ensemble du texte contenu dans l'article.", "Formule le contenu intégral de l'article en entier.", "Rédige la totalité du texte de l'article en entier.", "Compose l'intégralité du contenu textuel de l'article.", "Rédige l'ensemble du texte qui constitue l'article.", "Formule l'article entier dans son contenu écrit.", "Composez l'intégralité de l'article sous forme écrite.", "Écrivez la totalité du contenu de l'article.", "Formulez la totalité du texte présent dans l'article.", "Développez l'article dans son ensemble par écrit.", "Générez l'ensemble du texte contenu dans l'article.", "Formulez le contenu intégral de l'article en entier.", "Rédigez la totalité du texte de l'article en entier.", "Composez l'intégralité du contenu textuel de l'article.", "Écrivez l'article dans son intégralité en termes de texte.", "Rédigez l'ensemble du texte qui constitue l'article.", "Formulez l'article entier dans son contenu écrit.", "Composer l'intégralité de l'article sous forme écrite.", "Écrire la totalité du contenu de l'article.", "Formuler la totalité du texte présent dans l'article.", "Produire l'intégralité de l'article en écriture.", "Développer l'article dans son ensemble par écrit.", "Générer l'ensemble du texte contenu dans l'article.", "Formuler le contenu intégral de l'article en entier.", "Rédiger la totalité du texte de l'article en entier.", "Composer l'intégralité du contenu textuel de l'article.", "Rédiger l'ensemble du texte qui constitue l'article.", "Formuler l'article entier dans son contenu écrit.", "Quelles sont les dispositions de l'article ?", "Quelles dispositions sont incluses dans l'article ?", "Quelles sont les dispositions énoncées dans l'article ?", "Quel est le texte intégral de l'article ?", "Quelle est la lettre de l'article ?" ] ``` ## Citing this project If you use this code in your research, please use the following BibTeX entry. ```BibTeX @misc{louisbrulenaudet2023, author = {Louis Brulé Naudet}, title = {Code de la sécurité sociale, non-instruct (11-12-2023)}, howpublished = {\url{https://huggingface.co/datasets/louisbrulenaudet/code-securite-sociale}}, year = {2023} } ``` ## Feedback If you have any feedback, please reach out at [[email protected]](mailto:[email protected]).
louisbrulenaudet/code-securite-sociale
[ "task_categories:text-generation", "task_categories:table-question-answering", "task_categories:summarization", "task_categories:conversational", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:fr", "license:apache-2.0", "finetuning", "legal", "french law", "droit français", "Code de la sécurité sociale", "doi:10.57967/hf/1445", "region:us" ]
2023-12-12T00:15:15+00:00
{"language": ["fr"], "license": "apache-2.0", "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["text-generation", "table-question-answering", "summarization", "conversational"], "pretty_name": "Code de la s\u00e9curit\u00e9 sociale", "tags": ["finetuning", "legal", "french law", "droit fran\u00e7ais", "Code de la s\u00e9curit\u00e9 sociale"]}
2023-12-12T10:42:47+00:00
[]
[ "fr" ]
TAGS #task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code de la sécurité sociale #doi-10.57967/hf/1445 #region-us
# Code de la sécurité sociale, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - 'instruction': 'string', presenting the instruction linked to the element. - 'input': 'string', signifying the input details for the element. - 'output': 'string', indicating the output information for the element. We used the following list of instructions for generating the dataset: ## Citing this project If you use this code in your research, please use the following BibTeX entry. ## Feedback If you have any feedback, please reach out at louisbrulenaudet@URL.
[ "# Code de la sécurité sociale, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ "TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code de la sécurité sociale #doi-10.57967/hf/1445 #region-us \n", "# Code de la sécurité sociale, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ 126, 503, 100, 24, 21 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code de la sécurité sociale #doi-10.57967/hf/1445 #region-us \n" ]
73a8e29a0fa9aa9c5be5939051156154ac1d0791
# Code pénal, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - `instruction`: `string`, presenting the instruction linked to the element. - `input`: `string`, signifying the input details for the element. - `output`: `string`, indicating the output information for the element. We used the following list of instructions for generating the dataset: ```python instructions = [ "Compose l'intégralité de l'article sous forme écrite.", "Écris la totalité du contenu de l'article.", "Formule la totalité du texte présent dans l'article.", "Produis l'intégralité de l'article en écriture.", "Développe l'article dans son ensemble par écrit.", "Génère l'ensemble du texte contenu dans l'article.", "Formule le contenu intégral de l'article en entier.", "Rédige la totalité du texte de l'article en entier.", "Compose l'intégralité du contenu textuel de l'article.", "Rédige l'ensemble du texte qui constitue l'article.", "Formule l'article entier dans son contenu écrit.", "Composez l'intégralité de l'article sous forme écrite.", "Écrivez la totalité du contenu de l'article.", "Formulez la totalité du texte présent dans l'article.", "Développez l'article dans son ensemble par écrit.", "Générez l'ensemble du texte contenu dans l'article.", "Formulez le contenu intégral de l'article en entier.", "Rédigez la totalité du texte de l'article en entier.", "Composez l'intégralité du contenu textuel de l'article.", "Écrivez l'article dans son intégralité en termes de texte.", "Rédigez l'ensemble du texte qui constitue l'article.", "Formulez l'article entier dans son contenu écrit.", "Composer l'intégralité de l'article sous forme écrite.", "Écrire la totalité du contenu de l'article.", "Formuler la totalité du texte présent dans l'article.", "Produire l'intégralité de l'article en écriture.", "Développer l'article dans son ensemble par écrit.", "Générer l'ensemble du texte contenu dans l'article.", "Formuler le contenu intégral de l'article en entier.", "Rédiger la totalité du texte de l'article en entier.", "Composer l'intégralité du contenu textuel de l'article.", "Rédiger l'ensemble du texte qui constitue l'article.", "Formuler l'article entier dans son contenu écrit.", "Quelles sont les dispositions de l'article ?", "Quelles dispositions sont incluses dans l'article ?", "Quelles sont les dispositions énoncées dans l'article ?", "Quel est le texte intégral de l'article ?", "Quelle est la lettre de l'article ?" ] ``` ## Citing this project If you use this code in your research, please use the following BibTeX entry. ```BibTeX @misc{louisbrulenaudet2023, author = {Louis Brulé Naudet}, title = {Code pénal, non-instruct (11-12-2023)}, howpublished = {\url{https://huggingface.co/datasets/louisbrulenaudet/code-penal}}, year = {2023} } ``` ## Feedback If you have any feedback, please reach out at [[email protected]](mailto:[email protected]).
louisbrulenaudet/code-penal
[ "task_categories:text-generation", "task_categories:table-question-answering", "task_categories:summarization", "task_categories:conversational", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:fr", "license:apache-2.0", "finetuning", "legal", "french law", "droit français", "Code pénal", "doi:10.57967/hf/1444", "region:us" ]
2023-12-12T01:08:16+00:00
{"language": ["fr"], "license": "apache-2.0", "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["text-generation", "table-question-answering", "summarization", "conversational"], "pretty_name": "Code p\u00e9nal", "tags": ["finetuning", "legal", "french law", "droit fran\u00e7ais", "Code p\u00e9nal"]}
2023-12-12T10:42:07+00:00
[]
[ "fr" ]
TAGS #task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code pénal #doi-10.57967/hf/1444 #region-us
# Code pénal, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - 'instruction': 'string', presenting the instruction linked to the element. - 'input': 'string', signifying the input details for the element. - 'output': 'string', indicating the output information for the element. We used the following list of instructions for generating the dataset: ## Citing this project If you use this code in your research, please use the following BibTeX entry. ## Feedback If you have any feedback, please reach out at louisbrulenaudet@URL.
[ "# Code pénal, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ "TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code pénal #doi-10.57967/hf/1444 #region-us \n", "# Code pénal, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ 124, 501, 100, 24, 21 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code pénal #doi-10.57967/hf/1444 #region-us \n" ]
cb88e175ef8e96e98fa920355beb60a3a9674983
# Code du sport, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - `instruction`: `string`, presenting the instruction linked to the element. - `input`: `string`, signifying the input details for the element. - `output`: `string`, indicating the output information for the element. We used the following list of instructions for generating the dataset: ```python instructions = [ "Compose l'intégralité de l'article sous forme écrite.", "Écris la totalité du contenu de l'article.", "Formule la totalité du texte présent dans l'article.", "Produis l'intégralité de l'article en écriture.", "Développe l'article dans son ensemble par écrit.", "Génère l'ensemble du texte contenu dans l'article.", "Formule le contenu intégral de l'article en entier.", "Rédige la totalité du texte de l'article en entier.", "Compose l'intégralité du contenu textuel de l'article.", "Rédige l'ensemble du texte qui constitue l'article.", "Formule l'article entier dans son contenu écrit.", "Composez l'intégralité de l'article sous forme écrite.", "Écrivez la totalité du contenu de l'article.", "Formulez la totalité du texte présent dans l'article.", "Développez l'article dans son ensemble par écrit.", "Générez l'ensemble du texte contenu dans l'article.", "Formulez le contenu intégral de l'article en entier.", "Rédigez la totalité du texte de l'article en entier.", "Composez l'intégralité du contenu textuel de l'article.", "Écrivez l'article dans son intégralité en termes de texte.", "Rédigez l'ensemble du texte qui constitue l'article.", "Formulez l'article entier dans son contenu écrit.", "Composer l'intégralité de l'article sous forme écrite.", "Écrire la totalité du contenu de l'article.", "Formuler la totalité du texte présent dans l'article.", "Produire l'intégralité de l'article en écriture.", "Développer l'article dans son ensemble par écrit.", "Générer l'ensemble du texte contenu dans l'article.", "Formuler le contenu intégral de l'article en entier.", "Rédiger la totalité du texte de l'article en entier.", "Composer l'intégralité du contenu textuel de l'article.", "Rédiger l'ensemble du texte qui constitue l'article.", "Formuler l'article entier dans son contenu écrit.", "Quelles sont les dispositions de l'article ?", "Quelles dispositions sont incluses dans l'article ?", "Quelles sont les dispositions énoncées dans l'article ?", "Quel est le texte intégral de l'article ?", "Quelle est la lettre de l'article ?" ] ``` ## Citing this project If you use this code in your research, please use the following BibTeX entry. ```BibTeX @misc{louisbrulenaudet2023, author = {Louis Brulé Naudet}, title = {Code du sport, non-instruct (11-12-2023)}, howpublished = {\url{https://huggingface.co/datasets/louisbrulenaudet/code-sport}}, year = {2023} } ``` ## Feedback If you have any feedback, please reach out at [[email protected]](mailto:[email protected]).
louisbrulenaudet/code-sport
[ "task_categories:text-generation", "task_categories:table-question-answering", "task_categories:summarization", "task_categories:conversational", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:fr", "license:apache-2.0", "finetuning", "legal", "french law", "droit français", "Code du sport", "doi:10.57967/hf/1443", "region:us" ]
2023-12-12T01:17:27+00:00
{"language": ["fr"], "license": "apache-2.0", "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["text-generation", "table-question-answering", "summarization", "conversational"], "pretty_name": "Code du sport", "tags": ["finetuning", "legal", "french law", "droit fran\u00e7ais", "Code du sport"]}
2023-12-12T10:41:37+00:00
[]
[ "fr" ]
TAGS #task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code du sport #doi-10.57967/hf/1443 #region-us
# Code du sport, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - 'instruction': 'string', presenting the instruction linked to the element. - 'input': 'string', signifying the input details for the element. - 'output': 'string', indicating the output information for the element. We used the following list of instructions for generating the dataset: ## Citing this project If you use this code in your research, please use the following BibTeX entry. ## Feedback If you have any feedback, please reach out at louisbrulenaudet@URL.
[ "# Code du sport, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ "TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code du sport #doi-10.57967/hf/1443 #region-us \n", "# Code du sport, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ 124, 501, 100, 24, 21 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code du sport #doi-10.57967/hf/1443 #region-us \n" ]
b26327c0043acd0a0b087a7e33f29747d8670827
# Code civil, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - `instruction`: `string`, presenting the instruction linked to the element. - `input`: `string`, signifying the input details for the element. - `output`: `string`, indicating the output information for the element. We used the following list of instructions for generating the dataset: ```python instructions = [ "Compose l'intégralité de l'article sous forme écrite.", "Écris la totalité du contenu de l'article.", "Formule la totalité du texte présent dans l'article.", "Produis l'intégralité de l'article en écriture.", "Développe l'article dans son ensemble par écrit.", "Génère l'ensemble du texte contenu dans l'article.", "Formule le contenu intégral de l'article en entier.", "Rédige la totalité du texte de l'article en entier.", "Compose l'intégralité du contenu textuel de l'article.", "Rédige l'ensemble du texte qui constitue l'article.", "Formule l'article entier dans son contenu écrit.", "Composez l'intégralité de l'article sous forme écrite.", "Écrivez la totalité du contenu de l'article.", "Formulez la totalité du texte présent dans l'article.", "Développez l'article dans son ensemble par écrit.", "Générez l'ensemble du texte contenu dans l'article.", "Formulez le contenu intégral de l'article en entier.", "Rédigez la totalité du texte de l'article en entier.", "Composez l'intégralité du contenu textuel de l'article.", "Écrivez l'article dans son intégralité en termes de texte.", "Rédigez l'ensemble du texte qui constitue l'article.", "Formulez l'article entier dans son contenu écrit.", "Composer l'intégralité de l'article sous forme écrite.", "Écrire la totalité du contenu de l'article.", "Formuler la totalité du texte présent dans l'article.", "Produire l'intégralité de l'article en écriture.", "Développer l'article dans son ensemble par écrit.", "Générer l'ensemble du texte contenu dans l'article.", "Formuler le contenu intégral de l'article en entier.", "Rédiger la totalité du texte de l'article en entier.", "Composer l'intégralité du contenu textuel de l'article.", "Rédiger l'ensemble du texte qui constitue l'article.", "Formuler l'article entier dans son contenu écrit.", "Quelles sont les dispositions de l'article ?", "Quelles dispositions sont incluses dans l'article ?", "Quelles sont les dispositions énoncées dans l'article ?", "Quel est le texte intégral de l'article ?", "Quelle est la lettre de l'article ?" ] ``` ## Citing this project If you use this code in your research, please use the following BibTeX entry. ```BibTeX @misc{louisbrulenaudet2023, author = {Louis Brulé Naudet}, title = {Code civil, non-instruct (11-12-2023)}, howpublished = {\url{https://huggingface.co/datasets/louisbrulenaudet/code-civil}}, year = {2023} } ``` ## Feedback If you have any feedback, please reach out at [[email protected]](mailto:[email protected]).
louisbrulenaudet/code-civil
[ "task_categories:text-generation", "task_categories:table-question-answering", "task_categories:summarization", "task_categories:conversational", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:fr", "license:apache-2.0", "finetuning", "legal", "french law", "droit français", "Code civil", "doi:10.57967/hf/1442", "region:us" ]
2023-12-12T01:26:22+00:00
{"language": ["fr"], "license": "apache-2.0", "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["text-generation", "table-question-answering", "summarization", "conversational"], "pretty_name": "Code civil", "tags": ["finetuning", "legal", "french law", "droit fran\u00e7ais", "Code civil"]}
2023-12-12T10:41:13+00:00
[]
[ "fr" ]
TAGS #task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code civil #doi-10.57967/hf/1442 #region-us
# Code civil, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - 'instruction': 'string', presenting the instruction linked to the element. - 'input': 'string', signifying the input details for the element. - 'output': 'string', indicating the output information for the element. We used the following list of instructions for generating the dataset: ## Citing this project If you use this code in your research, please use the following BibTeX entry. ## Feedback If you have any feedback, please reach out at louisbrulenaudet@URL.
[ "# Code civil, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ "TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code civil #doi-10.57967/hf/1442 #region-us \n", "# Code civil, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ 123, 500, 100, 24, 21 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code civil #doi-10.57967/hf/1442 #region-us \n" ]
523b291b9c268f606d9ccb08cda1adadc5a79916
# Code de commerce, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - `instruction`: `string`, presenting the instruction linked to the element. - `input`: `string`, signifying the input details for the element. - `output`: `string`, indicating the output information for the element. We used the following list of instructions for generating the dataset: ```python instructions = [ "Compose l'intégralité de l'article sous forme écrite.", "Écris la totalité du contenu de l'article.", "Formule la totalité du texte présent dans l'article.", "Produis l'intégralité de l'article en écriture.", "Développe l'article dans son ensemble par écrit.", "Génère l'ensemble du texte contenu dans l'article.", "Formule le contenu intégral de l'article en entier.", "Rédige la totalité du texte de l'article en entier.", "Compose l'intégralité du contenu textuel de l'article.", "Rédige l'ensemble du texte qui constitue l'article.", "Formule l'article entier dans son contenu écrit.", "Composez l'intégralité de l'article sous forme écrite.", "Écrivez la totalité du contenu de l'article.", "Formulez la totalité du texte présent dans l'article.", "Développez l'article dans son ensemble par écrit.", "Générez l'ensemble du texte contenu dans l'article.", "Formulez le contenu intégral de l'article en entier.", "Rédigez la totalité du texte de l'article en entier.", "Composez l'intégralité du contenu textuel de l'article.", "Écrivez l'article dans son intégralité en termes de texte.", "Rédigez l'ensemble du texte qui constitue l'article.", "Formulez l'article entier dans son contenu écrit.", "Composer l'intégralité de l'article sous forme écrite.", "Écrire la totalité du contenu de l'article.", "Formuler la totalité du texte présent dans l'article.", "Produire l'intégralité de l'article en écriture.", "Développer l'article dans son ensemble par écrit.", "Générer l'ensemble du texte contenu dans l'article.", "Formuler le contenu intégral de l'article en entier.", "Rédiger la totalité du texte de l'article en entier.", "Composer l'intégralité du contenu textuel de l'article.", "Rédiger l'ensemble du texte qui constitue l'article.", "Formuler l'article entier dans son contenu écrit.", "Quelles sont les dispositions de l'article ?", "Quelles dispositions sont incluses dans l'article ?", "Quelles sont les dispositions énoncées dans l'article ?", "Quel est le texte intégral de l'article ?", "Quelle est la lettre de l'article ?" ] ``` ## Citing this project If you use this code in your research, please use the following BibTeX entry. ```BibTeX @misc{louisbrulenaudet2023, author = {Louis Brulé Naudet}, title = {Code de commerce, non-instruct (11-12-2023)}, howpublished = {\url{https://huggingface.co/datasets/louisbrulenaudet/code-commerce}}, year = {2023} } ``` ## Feedback If you have any feedback, please reach out at [[email protected]](mailto:[email protected]).
louisbrulenaudet/code-commerce
[ "task_categories:text-generation", "task_categories:table-question-answering", "task_categories:summarization", "task_categories:conversational", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:fr", "license:apache-2.0", "finetuning", "legal", "french law", "droit français", "Code de commerce", "doi:10.57967/hf/1448", "region:us" ]
2023-12-12T01:47:31+00:00
{"language": ["fr"], "license": "apache-2.0", "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["text-generation", "table-question-answering", "summarization", "conversational"], "pretty_name": "Code de commerce", "tags": ["finetuning", "legal", "french law", "droit fran\u00e7ais", "Code de commerce"]}
2023-12-12T10:40:35+00:00
[]
[ "fr" ]
TAGS #task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code de commerce #doi-10.57967/hf/1448 #region-us
# Code de commerce, non-instruct (11-12-2023) This project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. Fine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach. Instruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks. Instruction-based fine-tuning significantly enhances the performance of LLMs in the following ways: - Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions. - Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs. - Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more. - Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs. - Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text. ## Dataset generation This JSON file is a list of dictionaries, each dictionary contains the following fields: - 'instruction': 'string', presenting the instruction linked to the element. - 'input': 'string', signifying the input details for the element. - 'output': 'string', indicating the output information for the element. We used the following list of instructions for generating the dataset: ## Citing this project If you use this code in your research, please use the following BibTeX entry. ## Feedback If you have any feedback, please reach out at louisbrulenaudet@URL.
[ "# Code de commerce, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ "TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code de commerce #doi-10.57967/hf/1448 #region-us \n", "# Code de commerce, non-instruct (11-12-2023)\n\nThis project focuses on fine-tuning pre-trained language models to create efficient and accurate models for legal practice. \n\nFine-tuning is the process of adapting a pre-trained model to perform specific tasks or cater to particular domains. It involves adjusting the model's parameters through a further round of training on task-specific or domain-specific data. While conventional fine-tuning strategies involve supervised learning with labeled data, instruction-based fine-tuning introduces a more structured and interpretable approach.\n\nInstruction-based fine-tuning leverages the power of human-provided instructions to guide the model's behavior. These instructions can be in the form of text prompts, prompts with explicit task descriptions, or a combination of both. This approach allows for a more controlled and context-aware interaction with the LLM, making it adaptable to a multitude of specialized tasks.\n\nInstruction-based fine-tuning significantly enhances the performance of LLMs in the following ways:\n\n- Task-Specific Adaptation: LLMs, when fine-tuned with specific instructions, exhibit remarkable adaptability to diverse tasks. They can switch seamlessly between translation, summarization, and question-answering, guided by the provided instructions.\n- Reduced Ambiguity: Traditional LLMs might generate ambiguous or contextually inappropriate responses. Instruction-based fine-tuning allows for a clearer and more context-aware generation, reducing the likelihood of nonsensical outputs.\n- Efficient Knowledge Transfer: Instructions can encapsulate domain-specific knowledge, enabling LLMs to benefit from expert guidance. This knowledge transfer is particularly valuable in fields like tax practice, law, medicine, and more.\n- Interpretability: Instruction-based fine-tuning also makes LLM behavior more interpretable. Since the instructions are human-readable, it becomes easier to understand and control model outputs.\n- Adaptive Behavior: LLMs, post instruction-based fine-tuning, exhibit adaptive behavior that is responsive to both explicit task descriptions and implicit cues within the provided text.", "## Dataset generation\n\nThis JSON file is a list of dictionaries, each dictionary contains the following fields:\n\n- 'instruction': 'string', presenting the instruction linked to the element.\n- 'input': 'string', signifying the input details for the element.\n- 'output': 'string', indicating the output information for the element.\n\nWe used the following list of instructions for generating the dataset:", "## Citing this project\n\nIf you use this code in your research, please use the following BibTeX entry.", "## Feedback\n\nIf you have any feedback, please reach out at louisbrulenaudet@URL." ]
[ 124, 501, 100, 24, 21 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-table-question-answering #task_categories-summarization #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-French #license-apache-2.0 #finetuning #legal #french law #droit français #Code de commerce #doi-10.57967/hf/1448 #region-us \n" ]
700b5f5a8b63323878697e98013f3a5088afb518
# Orca-DPO-Pairs-KO Intel/orca_dpo_pais 를 한국어로 번역한 데이터세트 입니다. 번역은 maywell/Syntra-7B-v0.3-Translation 을 사용했습니다. 번역 후 일부 오류가 발생한 라인은 삭제했기 때문에 원본과 차이가 있을 수 있습니다.
Ja-ck/Orca-DPO-Pairs-KO
[ "size_categories:10K<n<100K", "language:ko", "license:apache-2.0", "region:us" ]
2023-12-12T01:56:54+00:00
{"language": ["ko"], "license": "apache-2.0", "size_categories": ["10K<n<100K"]}
2023-12-12T01:58:39+00:00
[]
[ "ko" ]
TAGS #size_categories-10K<n<100K #language-Korean #license-apache-2.0 #region-us
# Orca-DPO-Pairs-KO Intel/orca_dpo_pais 를 한국어로 번역한 데이터세트 입니다. 번역은 maywell/Syntra-7B-v0.3-Translation 을 사용했습니다. 번역 후 일부 오류가 발생한 라인은 삭제했기 때문에 원본과 차이가 있을 수 있습니다.
[ "# Orca-DPO-Pairs-KO\n\nIntel/orca_dpo_pais 를 한국어로 번역한 데이터세트 입니다.\n\n번역은 maywell/Syntra-7B-v0.3-Translation 을 사용했습니다. 번역 후 일부 오류가 발생한 라인은 삭제했기 때문에 원본과 차이가 있을 수 있습니다." ]
[ "TAGS\n#size_categories-10K<n<100K #language-Korean #license-apache-2.0 #region-us \n", "# Orca-DPO-Pairs-KO\n\nIntel/orca_dpo_pais 를 한국어로 번역한 데이터세트 입니다.\n\n번역은 maywell/Syntra-7B-v0.3-Translation 을 사용했습니다. 번역 후 일부 오류가 발생한 라인은 삭제했기 때문에 원본과 차이가 있을 수 있습니다." ]
[ 31, 74 ]
[ "passage: TAGS\n#size_categories-10K<n<100K #language-Korean #license-apache-2.0 #region-us \n# Orca-DPO-Pairs-KO\n\nIntel/orca_dpo_pais 를 한국어로 번역한 데이터세트 입니다.\n\n번역은 maywell/Syntra-7B-v0.3-Translation 을 사용했습니다. 번역 후 일부 오류가 발생한 라인은 삭제했기 때문에 원본과 차이가 있을 수 있습니다." ]
78eba47712f916007ee24c1a89d012a25d3c8058
# More Information Needed E-mail: [email protected]
TomokiFujihara/japanese_offensiveness_estimation_dataset
[ "license:apache-2.0", "region:us" ]
2023-12-12T01:58:48+00:00
{"license": "apache-2.0"}
2023-12-12T02:34:10+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
# E-mail: tomoki.fujihara.p3@URL
[ "# \nE-mail: tomoki.fujihara.p3@URL" ]
[ "TAGS\n#license-apache-2.0 #region-us \n", "# \nE-mail: tomoki.fujihara.p3@URL" ]
[ 14, 16 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n# \nE-mail: tomoki.fujihara.p3@URL" ]
a7a27001e856467831a2f9f753594c4fca165fd5
# Dataset Card for Evaluation run of TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_TinyLlama__TinyLlama-1.1B-intermediate-step-1195k-token-2.5T", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T02:53:47.167196](https://huggingface.co/datasets/open-llm-leaderboard/details_TinyLlama__TinyLlama-1.1B-intermediate-step-1195k-token-2.5T/blob/main/results_2023-12-12T02-53-47.167196.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.2675448542417715, "acc_stderr": 0.03118011664128985, "acc_norm": 0.2690761116467705, "acc_norm_stderr": 0.03195175927273723, "mc1": 0.20930232558139536, "mc1_stderr": 0.014241219434785828, "mc2": 0.3678523017186956, "mc2_stderr": 0.013764237138063459 }, "harness|arc:challenge|25": { "acc": 0.31143344709897613, "acc_stderr": 0.013532472099850942, "acc_norm": 0.33532423208191126, "acc_norm_stderr": 0.013796182947785562 }, "harness|hellaswag|10": { "acc": 0.4458275243975304, "acc_stderr": 0.004960408362133243, "acc_norm": 0.5938060147381, "acc_norm_stderr": 0.00490117891790085 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.25, "acc_stderr": 0.04351941398892446, "acc_norm": 0.25, "acc_norm_stderr": 0.04351941398892446 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.26666666666666666, "acc_stderr": 0.03820169914517905, "acc_norm": 0.26666666666666666, "acc_norm_stderr": 0.03820169914517905 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.1513157894736842, "acc_stderr": 0.029162631596843975, "acc_norm": 0.1513157894736842, "acc_norm_stderr": 0.029162631596843975 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.26, "acc_stderr": 0.0440844002276808, "acc_norm": 0.26, "acc_norm_stderr": 0.0440844002276808 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.2641509433962264, "acc_stderr": 0.02713429162874171, "acc_norm": 0.2641509433962264, "acc_norm_stderr": 0.02713429162874171 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.2222222222222222, "acc_stderr": 0.03476590104304134, "acc_norm": 0.2222222222222222, "acc_norm_stderr": 0.03476590104304134 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.28, "acc_stderr": 0.04512608598542127, "acc_norm": 0.28, "acc_norm_stderr": 0.04512608598542127 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.29, "acc_stderr": 0.045604802157206845, "acc_norm": 0.29, "acc_norm_stderr": 0.045604802157206845 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.33, "acc_stderr": 0.047258156262526045, "acc_norm": 0.33, "acc_norm_stderr": 0.047258156262526045 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.23699421965317918, "acc_stderr": 0.03242414757483098, "acc_norm": 0.23699421965317918, "acc_norm_stderr": 0.03242414757483098 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.21568627450980393, "acc_stderr": 0.04092563958237656, "acc_norm": 0.21568627450980393, "acc_norm_stderr": 0.04092563958237656 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.23, "acc_stderr": 0.042295258468165065, "acc_norm": 0.23, "acc_norm_stderr": 0.042295258468165065 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.2936170212765957, "acc_stderr": 0.029771642712491227, "acc_norm": 0.2936170212765957, "acc_norm_stderr": 0.029771642712491227 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.2543859649122807, "acc_stderr": 0.04096985139843672, "acc_norm": 0.2543859649122807, "acc_norm_stderr": 0.04096985139843672 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.2413793103448276, "acc_stderr": 0.03565998174135303, "acc_norm": 0.2413793103448276, "acc_norm_stderr": 0.03565998174135303 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.2777777777777778, "acc_stderr": 0.023068188848261114, "acc_norm": 0.2777777777777778, "acc_norm_stderr": 0.023068188848261114 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.2222222222222222, "acc_stderr": 0.037184890068181146, "acc_norm": 0.2222222222222222, "acc_norm_stderr": 0.037184890068181146 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.31, "acc_stderr": 0.04648231987117316, "acc_norm": 0.31, "acc_norm_stderr": 0.04648231987117316 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.2161290322580645, "acc_stderr": 0.02341529343356852, "acc_norm": 0.2161290322580645, "acc_norm_stderr": 0.02341529343356852 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.2512315270935961, "acc_stderr": 0.030516530732694433, "acc_norm": 0.2512315270935961, "acc_norm_stderr": 0.030516530732694433 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.26, "acc_stderr": 0.044084400227680794, "acc_norm": 0.26, "acc_norm_stderr": 0.044084400227680794 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.2727272727272727, "acc_stderr": 0.03477691162163659, "acc_norm": 0.2727272727272727, "acc_norm_stderr": 0.03477691162163659 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.2222222222222222, "acc_stderr": 0.02962022787479049, "acc_norm": 0.2222222222222222, "acc_norm_stderr": 0.02962022787479049 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.23834196891191708, "acc_stderr": 0.03074890536390988, "acc_norm": 0.23834196891191708, "acc_norm_stderr": 0.03074890536390988 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.28205128205128205, "acc_stderr": 0.0228158130988966, "acc_norm": 0.28205128205128205, "acc_norm_stderr": 0.0228158130988966 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.24074074074074073, "acc_stderr": 0.026067159222275815, "acc_norm": 0.24074074074074073, "acc_norm_stderr": 0.026067159222275815 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.25630252100840334, "acc_stderr": 0.02835962087053395, "acc_norm": 0.25630252100840334, "acc_norm_stderr": 0.02835962087053395 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.23841059602649006, "acc_stderr": 0.03479185572599661, "acc_norm": 0.23841059602649006, "acc_norm_stderr": 0.03479185572599661 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.23669724770642203, "acc_stderr": 0.01822407811729908, "acc_norm": 0.23669724770642203, "acc_norm_stderr": 0.01822407811729908 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.4722222222222222, "acc_stderr": 0.0340470532865388, "acc_norm": 0.4722222222222222, "acc_norm_stderr": 0.0340470532865388 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.29411764705882354, "acc_stderr": 0.03198001660115071, "acc_norm": 0.29411764705882354, "acc_norm_stderr": 0.03198001660115071 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.270042194092827, "acc_stderr": 0.028900721906293426, "acc_norm": 0.270042194092827, "acc_norm_stderr": 0.028900721906293426 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.37668161434977576, "acc_stderr": 0.032521134899291884, "acc_norm": 0.37668161434977576, "acc_norm_stderr": 0.032521134899291884 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.22900763358778625, "acc_stderr": 0.036853466317118506, "acc_norm": 0.22900763358778625, "acc_norm_stderr": 0.036853466317118506 }, "harness|hendrycksTest-international_law|5": { "acc": 0.24793388429752067, "acc_stderr": 0.03941897526516303, "acc_norm": 0.24793388429752067, "acc_norm_stderr": 0.03941897526516303 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.28703703703703703, "acc_stderr": 0.043733130409147614, "acc_norm": 0.28703703703703703, "acc_norm_stderr": 0.043733130409147614 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.3006134969325153, "acc_stderr": 0.03602511318806771, "acc_norm": 0.3006134969325153, "acc_norm_stderr": 0.03602511318806771 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.3125, "acc_stderr": 0.043994650575715215, "acc_norm": 0.3125, "acc_norm_stderr": 0.043994650575715215 }, "harness|hendrycksTest-management|5": { "acc": 0.2621359223300971, "acc_stderr": 0.04354631077260597, "acc_norm": 0.2621359223300971, "acc_norm_stderr": 0.04354631077260597 }, "harness|hendrycksTest-marketing|5": { "acc": 0.2606837606837607, "acc_stderr": 0.028760348956523414, "acc_norm": 0.2606837606837607, "acc_norm_stderr": 0.028760348956523414 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.26, "acc_stderr": 0.044084400227680794, "acc_norm": 0.26, "acc_norm_stderr": 0.044084400227680794 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.2835249042145594, "acc_stderr": 0.016117318166832283, "acc_norm": 0.2835249042145594, "acc_norm_stderr": 0.016117318166832283 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.21676300578034682, "acc_stderr": 0.022183477668412853, "acc_norm": 0.21676300578034682, "acc_norm_stderr": 0.022183477668412853 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.2424581005586592, "acc_stderr": 0.014333522059217889, "acc_norm": 0.2424581005586592, "acc_norm_stderr": 0.014333522059217889 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.238562091503268, "acc_stderr": 0.02440439492808787, "acc_norm": 0.238562091503268, "acc_norm_stderr": 0.02440439492808787 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.3215434083601286, "acc_stderr": 0.026527724079528872, "acc_norm": 0.3215434083601286, "acc_norm_stderr": 0.026527724079528872 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.2716049382716049, "acc_stderr": 0.02474862449053737, "acc_norm": 0.2716049382716049, "acc_norm_stderr": 0.02474862449053737 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.21631205673758866, "acc_stderr": 0.024561720560562796, "acc_norm": 0.21631205673758866, "acc_norm_stderr": 0.024561720560562796 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.23533246414602346, "acc_stderr": 0.010834432543912234, "acc_norm": 0.23533246414602346, "acc_norm_stderr": 0.010834432543912234 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.3639705882352941, "acc_stderr": 0.029227192460032025, "acc_norm": 0.3639705882352941, "acc_norm_stderr": 0.029227192460032025 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.25326797385620914, "acc_stderr": 0.01759348689536683, "acc_norm": 0.25326797385620914, "acc_norm_stderr": 0.01759348689536683 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.2909090909090909, "acc_stderr": 0.04350271442923243, "acc_norm": 0.2909090909090909, "acc_norm_stderr": 0.04350271442923243 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.15510204081632653, "acc_stderr": 0.0231747988612186, "acc_norm": 0.15510204081632653, "acc_norm_stderr": 0.0231747988612186 }, "harness|hendrycksTest-sociology|5": { "acc": 0.23880597014925373, "acc_stderr": 0.030147775935409224, "acc_norm": 0.23880597014925373, "acc_norm_stderr": 0.030147775935409224 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.21, "acc_stderr": 0.040936018074033256, "acc_norm": 0.21, "acc_norm_stderr": 0.040936018074033256 }, "harness|hendrycksTest-virology|5": { "acc": 0.3192771084337349, "acc_stderr": 0.03629335329947861, "acc_norm": 0.3192771084337349, "acc_norm_stderr": 0.03629335329947861 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.2046783625730994, "acc_stderr": 0.03094445977853321, "acc_norm": 0.2046783625730994, "acc_norm_stderr": 0.03094445977853321 }, "harness|truthfulqa:mc|0": { "mc1": 0.20930232558139536, "mc1_stderr": 0.014241219434785828, "mc2": 0.3678523017186956, "mc2_stderr": 0.013764237138063459 }, "harness|winogrande|5": { "acc": 0.6022099447513812, "acc_stderr": 0.013755743513749029 }, "harness|gsm8k|5": { "acc": 0.014404852160727824, "acc_stderr": 0.0032820559171369574 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_TinyLlama__TinyLlama-1.1B-intermediate-step-1195k-token-2.5T
[ "region:us" ]
2023-12-12T02:56:04+00:00
{"pretty_name": "Evaluation run of TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T", "dataset_summary": "Dataset automatically created during the evaluation run of model [TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_TinyLlama__TinyLlama-1.1B-intermediate-step-1195k-token-2.5T\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T02:53:47.167196](https://huggingface.co/datasets/open-llm-leaderboard/details_TinyLlama__TinyLlama-1.1B-intermediate-step-1195k-token-2.5T/blob/main/results_2023-12-12T02-53-47.167196.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.2675448542417715,\n \"acc_stderr\": 0.03118011664128985,\n \"acc_norm\": 0.2690761116467705,\n \"acc_norm_stderr\": 0.03195175927273723,\n \"mc1\": 0.20930232558139536,\n \"mc1_stderr\": 0.014241219434785828,\n \"mc2\": 0.3678523017186956,\n \"mc2_stderr\": 0.013764237138063459\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.31143344709897613,\n \"acc_stderr\": 0.013532472099850942,\n \"acc_norm\": 0.33532423208191126,\n \"acc_norm_stderr\": 0.013796182947785562\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.4458275243975304,\n \"acc_stderr\": 0.004960408362133243,\n \"acc_norm\": 0.5938060147381,\n \"acc_norm_stderr\": 0.00490117891790085\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.25,\n \"acc_stderr\": 0.04351941398892446,\n \"acc_norm\": 0.25,\n \"acc_norm_stderr\": 0.04351941398892446\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.26666666666666666,\n \"acc_stderr\": 0.03820169914517905,\n \"acc_norm\": 0.26666666666666666,\n \"acc_norm_stderr\": 0.03820169914517905\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.1513157894736842,\n \"acc_stderr\": 0.029162631596843975,\n \"acc_norm\": 0.1513157894736842,\n \"acc_norm_stderr\": 0.029162631596843975\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.26,\n \"acc_stderr\": 0.0440844002276808,\n \"acc_norm\": 0.26,\n \"acc_norm_stderr\": 0.0440844002276808\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.2641509433962264,\n \"acc_stderr\": 0.02713429162874171,\n \"acc_norm\": 0.2641509433962264,\n \"acc_norm_stderr\": 0.02713429162874171\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.2222222222222222,\n \"acc_stderr\": 0.03476590104304134,\n \"acc_norm\": 0.2222222222222222,\n \"acc_norm_stderr\": 0.03476590104304134\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.28,\n \"acc_stderr\": 0.04512608598542127,\n \"acc_norm\": 0.28,\n \"acc_norm_stderr\": 0.04512608598542127\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.29,\n \"acc_stderr\": 0.045604802157206845,\n \"acc_norm\": 0.29,\n \"acc_norm_stderr\": 0.045604802157206845\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.33,\n \"acc_stderr\": 0.047258156262526045,\n \"acc_norm\": 0.33,\n \"acc_norm_stderr\": 0.047258156262526045\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.23699421965317918,\n \"acc_stderr\": 0.03242414757483098,\n \"acc_norm\": 0.23699421965317918,\n \"acc_norm_stderr\": 0.03242414757483098\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.21568627450980393,\n \"acc_stderr\": 0.04092563958237656,\n \"acc_norm\": 0.21568627450980393,\n \"acc_norm_stderr\": 0.04092563958237656\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.23,\n \"acc_stderr\": 0.042295258468165065,\n \"acc_norm\": 0.23,\n \"acc_norm_stderr\": 0.042295258468165065\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.2936170212765957,\n \"acc_stderr\": 0.029771642712491227,\n \"acc_norm\": 0.2936170212765957,\n \"acc_norm_stderr\": 0.029771642712491227\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.2543859649122807,\n \"acc_stderr\": 0.04096985139843672,\n \"acc_norm\": 0.2543859649122807,\n \"acc_norm_stderr\": 0.04096985139843672\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.2413793103448276,\n \"acc_stderr\": 0.03565998174135303,\n \"acc_norm\": 0.2413793103448276,\n \"acc_norm_stderr\": 0.03565998174135303\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.2777777777777778,\n \"acc_stderr\": 0.023068188848261114,\n \"acc_norm\": 0.2777777777777778,\n \"acc_norm_stderr\": 0.023068188848261114\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.2222222222222222,\n \"acc_stderr\": 0.037184890068181146,\n \"acc_norm\": 0.2222222222222222,\n \"acc_norm_stderr\": 0.037184890068181146\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.31,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.31,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.2161290322580645,\n \"acc_stderr\": 0.02341529343356852,\n \"acc_norm\": 0.2161290322580645,\n \"acc_norm_stderr\": 0.02341529343356852\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.2512315270935961,\n \"acc_stderr\": 0.030516530732694433,\n \"acc_norm\": 0.2512315270935961,\n \"acc_norm_stderr\": 0.030516530732694433\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.26,\n \"acc_stderr\": 0.044084400227680794,\n \"acc_norm\": 0.26,\n \"acc_norm_stderr\": 0.044084400227680794\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.2727272727272727,\n \"acc_stderr\": 0.03477691162163659,\n \"acc_norm\": 0.2727272727272727,\n \"acc_norm_stderr\": 0.03477691162163659\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.2222222222222222,\n \"acc_stderr\": 0.02962022787479049,\n \"acc_norm\": 0.2222222222222222,\n \"acc_norm_stderr\": 0.02962022787479049\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.23834196891191708,\n \"acc_stderr\": 0.03074890536390988,\n \"acc_norm\": 0.23834196891191708,\n \"acc_norm_stderr\": 0.03074890536390988\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.28205128205128205,\n \"acc_stderr\": 0.0228158130988966,\n \"acc_norm\": 0.28205128205128205,\n \"acc_norm_stderr\": 0.0228158130988966\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.24074074074074073,\n \"acc_stderr\": 0.026067159222275815,\n \"acc_norm\": 0.24074074074074073,\n \"acc_norm_stderr\": 0.026067159222275815\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.25630252100840334,\n \"acc_stderr\": 0.02835962087053395,\n \"acc_norm\": 0.25630252100840334,\n \"acc_norm_stderr\": 0.02835962087053395\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.23841059602649006,\n \"acc_stderr\": 0.03479185572599661,\n \"acc_norm\": 0.23841059602649006,\n \"acc_norm_stderr\": 0.03479185572599661\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.23669724770642203,\n \"acc_stderr\": 0.01822407811729908,\n \"acc_norm\": 0.23669724770642203,\n \"acc_norm_stderr\": 0.01822407811729908\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.4722222222222222,\n \"acc_stderr\": 0.0340470532865388,\n \"acc_norm\": 0.4722222222222222,\n \"acc_norm_stderr\": 0.0340470532865388\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.29411764705882354,\n \"acc_stderr\": 0.03198001660115071,\n \"acc_norm\": 0.29411764705882354,\n \"acc_norm_stderr\": 0.03198001660115071\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.270042194092827,\n \"acc_stderr\": 0.028900721906293426,\n \"acc_norm\": 0.270042194092827,\n \"acc_norm_stderr\": 0.028900721906293426\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.37668161434977576,\n \"acc_stderr\": 0.032521134899291884,\n \"acc_norm\": 0.37668161434977576,\n \"acc_norm_stderr\": 0.032521134899291884\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.22900763358778625,\n \"acc_stderr\": 0.036853466317118506,\n \"acc_norm\": 0.22900763358778625,\n \"acc_norm_stderr\": 0.036853466317118506\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.24793388429752067,\n \"acc_stderr\": 0.03941897526516303,\n \"acc_norm\": 0.24793388429752067,\n \"acc_norm_stderr\": 0.03941897526516303\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.28703703703703703,\n \"acc_stderr\": 0.043733130409147614,\n \"acc_norm\": 0.28703703703703703,\n \"acc_norm_stderr\": 0.043733130409147614\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.3006134969325153,\n \"acc_stderr\": 0.03602511318806771,\n \"acc_norm\": 0.3006134969325153,\n \"acc_norm_stderr\": 0.03602511318806771\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.3125,\n \"acc_stderr\": 0.043994650575715215,\n \"acc_norm\": 0.3125,\n \"acc_norm_stderr\": 0.043994650575715215\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.2621359223300971,\n \"acc_stderr\": 0.04354631077260597,\n \"acc_norm\": 0.2621359223300971,\n \"acc_norm_stderr\": 0.04354631077260597\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.2606837606837607,\n \"acc_stderr\": 0.028760348956523414,\n \"acc_norm\": 0.2606837606837607,\n \"acc_norm_stderr\": 0.028760348956523414\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.26,\n \"acc_stderr\": 0.044084400227680794,\n \"acc_norm\": 0.26,\n \"acc_norm_stderr\": 0.044084400227680794\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.2835249042145594,\n \"acc_stderr\": 0.016117318166832283,\n \"acc_norm\": 0.2835249042145594,\n \"acc_norm_stderr\": 0.016117318166832283\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.21676300578034682,\n \"acc_stderr\": 0.022183477668412853,\n \"acc_norm\": 0.21676300578034682,\n \"acc_norm_stderr\": 0.022183477668412853\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.2424581005586592,\n \"acc_stderr\": 0.014333522059217889,\n \"acc_norm\": 0.2424581005586592,\n \"acc_norm_stderr\": 0.014333522059217889\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.238562091503268,\n \"acc_stderr\": 0.02440439492808787,\n \"acc_norm\": 0.238562091503268,\n \"acc_norm_stderr\": 0.02440439492808787\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.3215434083601286,\n \"acc_stderr\": 0.026527724079528872,\n \"acc_norm\": 0.3215434083601286,\n \"acc_norm_stderr\": 0.026527724079528872\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.2716049382716049,\n \"acc_stderr\": 0.02474862449053737,\n \"acc_norm\": 0.2716049382716049,\n \"acc_norm_stderr\": 0.02474862449053737\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.21631205673758866,\n \"acc_stderr\": 0.024561720560562796,\n \"acc_norm\": 0.21631205673758866,\n \"acc_norm_stderr\": 0.024561720560562796\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.23533246414602346,\n \"acc_stderr\": 0.010834432543912234,\n \"acc_norm\": 0.23533246414602346,\n \"acc_norm_stderr\": 0.010834432543912234\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.3639705882352941,\n \"acc_stderr\": 0.029227192460032025,\n \"acc_norm\": 0.3639705882352941,\n \"acc_norm_stderr\": 0.029227192460032025\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.25326797385620914,\n \"acc_stderr\": 0.01759348689536683,\n \"acc_norm\": 0.25326797385620914,\n \"acc_norm_stderr\": 0.01759348689536683\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.2909090909090909,\n \"acc_stderr\": 0.04350271442923243,\n \"acc_norm\": 0.2909090909090909,\n \"acc_norm_stderr\": 0.04350271442923243\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.15510204081632653,\n \"acc_stderr\": 0.0231747988612186,\n \"acc_norm\": 0.15510204081632653,\n \"acc_norm_stderr\": 0.0231747988612186\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.23880597014925373,\n \"acc_stderr\": 0.030147775935409224,\n \"acc_norm\": 0.23880597014925373,\n \"acc_norm_stderr\": 0.030147775935409224\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.21,\n \"acc_stderr\": 0.040936018074033256,\n \"acc_norm\": 0.21,\n \"acc_norm_stderr\": 0.040936018074033256\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.3192771084337349,\n \"acc_stderr\": 0.03629335329947861,\n \"acc_norm\": 0.3192771084337349,\n \"acc_norm_stderr\": 0.03629335329947861\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.2046783625730994,\n \"acc_stderr\": 0.03094445977853321,\n \"acc_norm\": 0.2046783625730994,\n \"acc_norm_stderr\": 0.03094445977853321\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.20930232558139536,\n \"mc1_stderr\": 0.014241219434785828,\n \"mc2\": 0.3678523017186956,\n \"mc2_stderr\": 0.013764237138063459\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.6022099447513812,\n \"acc_stderr\": 0.013755743513749029\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.014404852160727824,\n \"acc_stderr\": 0.0032820559171369574\n }\n}\n```", "repo_url": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|arc:challenge|25_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|gsm8k|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hellaswag|10_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T02-53-47.167196.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["**/details_harness|winogrande|5_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T02-53-47.167196.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T02_53_47.167196", "path": ["results_2023-12-12T02-53-47.167196.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T02-53-47.167196.parquet"]}]}]}
2023-12-12T02:56:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T Dataset automatically created during the evaluation run of model TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T02:53:47.167196(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T\n\n\n\nDataset automatically created during the evaluation run of model TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T02:53:47.167196(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T\n\n\n\nDataset automatically created during the evaluation run of model TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T02:53:47.167196(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 215, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T\n\n\n\nDataset automatically created during the evaluation run of model TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T02:53:47.167196(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:" ]
987486e17914f56a299ed62d4554135e20d93507
- original dataset: [korean data from kaist-ai/Multilingual-CoT-Collection](https://huggingface.co/datasets/kaist-ai/Multilingual-CoT-Collection)
heegyu/CoT-collection-ko
[ "license:cc-by-4.0", "region:us" ]
2023-12-12T03:16:34+00:00
{"license": "cc-by-4.0"}
2023-12-12T03:20:48+00:00
[]
[]
TAGS #license-cc-by-4.0 #region-us
- original dataset: korean data from kaist-ai/Multilingual-CoT-Collection
[]
[ "TAGS\n#license-cc-by-4.0 #region-us \n" ]
[ 15 ]
[ "passage: TAGS\n#license-cc-by-4.0 #region-us \n" ]
7cc25512ddc61d791f17a7ac0cda3f6a9251da59
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
Lask8/gradio-lipsync-wav2lip
[ "license:apache-2.0", "region:us" ]
2023-12-12T03:28:46+00:00
{"license": "apache-2.0", "title": "Gradio Lipsync Wav2lip", "emoji": "\ud83d\udc44", "colorFrom": "indigo", "colorTo": "blue", "sdk": "gradio", "python_version": 3.8, "sdk_version": "3.40.1", "suggested_hardware": "t4-medium", "app_file": "app.py", "pinned": false}
2023-12-12T03:50:04+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
Check out the configuration reference at URL
[]
[ "TAGS\n#license-apache-2.0 #region-us \n" ]
[ 14 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n" ]
703a2a8741e862f4491cfd365eb402428d95262b
# Dataset Card for Evaluation run of janhq/supermario-slerp <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [janhq/supermario-slerp](https://huggingface.co/janhq/supermario-slerp) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_janhq__supermario-slerp", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T03:37:12.561456](https://huggingface.co/datasets/open-llm-leaderboard/details_janhq__supermario-slerp/blob/main/results_2023-12-12T03-37-12.561456.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.6537706437546362, "acc_stderr": 0.031995288620737625, "acc_norm": 0.6536617491214738, "acc_norm_stderr": 0.03265751997212851, "mc1": 0.43818849449204406, "mc1_stderr": 0.017369236164404438, "mc2": 0.6010546669403226, "mc2_stderr": 0.015217019912801435 }, "harness|arc:challenge|25": { "acc": 0.6578498293515358, "acc_stderr": 0.013864152159177278, "acc_norm": 0.689419795221843, "acc_norm_stderr": 0.013522292098053069 }, "harness|hellaswag|10": { "acc": 0.6773551085441147, "acc_stderr": 0.004665327309399185, "acc_norm": 0.8657637920732921, "acc_norm_stderr": 0.0034020920763237457 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.32, "acc_stderr": 0.04688261722621504, "acc_norm": 0.32, "acc_norm_stderr": 0.04688261722621504 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.6370370370370371, "acc_stderr": 0.041539484047423976, "acc_norm": 0.6370370370370371, "acc_norm_stderr": 0.041539484047423976 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.7236842105263158, "acc_stderr": 0.03639057569952929, "acc_norm": 0.7236842105263158, "acc_norm_stderr": 0.03639057569952929 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.64, "acc_stderr": 0.04824181513244218, "acc_norm": 0.64, "acc_norm_stderr": 0.04824181513244218 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.720754716981132, "acc_stderr": 0.027611163402399715, "acc_norm": 0.720754716981132, "acc_norm_stderr": 0.027611163402399715 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.7708333333333334, "acc_stderr": 0.03514697467862388, "acc_norm": 0.7708333333333334, "acc_norm_stderr": 0.03514697467862388 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.46, "acc_stderr": 0.05009082659620333, "acc_norm": 0.46, "acc_norm_stderr": 0.05009082659620333 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.53, "acc_stderr": 0.050161355804659205, "acc_norm": 0.53, "acc_norm_stderr": 0.050161355804659205 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.34, "acc_stderr": 0.04760952285695235, "acc_norm": 0.34, "acc_norm_stderr": 0.04760952285695235 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.6705202312138728, "acc_stderr": 0.03583901754736412, "acc_norm": 0.6705202312138728, "acc_norm_stderr": 0.03583901754736412 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.39215686274509803, "acc_stderr": 0.048580835742663454, "acc_norm": 0.39215686274509803, "acc_norm_stderr": 0.048580835742663454 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.77, "acc_stderr": 0.042295258468165065, "acc_norm": 0.77, "acc_norm_stderr": 0.042295258468165065 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.5957446808510638, "acc_stderr": 0.03208115750788684, "acc_norm": 0.5957446808510638, "acc_norm_stderr": 0.03208115750788684 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.4824561403508772, "acc_stderr": 0.04700708033551038, "acc_norm": 0.4824561403508772, "acc_norm_stderr": 0.04700708033551038 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.5448275862068965, "acc_stderr": 0.04149886942192117, "acc_norm": 0.5448275862068965, "acc_norm_stderr": 0.04149886942192117 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.42592592592592593, "acc_stderr": 0.02546714904546955, "acc_norm": 0.42592592592592593, "acc_norm_stderr": 0.02546714904546955 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.4603174603174603, "acc_stderr": 0.04458029125470973, "acc_norm": 0.4603174603174603, "acc_norm_stderr": 0.04458029125470973 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.36, "acc_stderr": 0.048241815132442176, "acc_norm": 0.36, "acc_norm_stderr": 0.048241815132442176 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.7741935483870968, "acc_stderr": 0.023785577884181015, "acc_norm": 0.7741935483870968, "acc_norm_stderr": 0.023785577884181015 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.5024630541871922, "acc_stderr": 0.035179450386910616, "acc_norm": 0.5024630541871922, "acc_norm_stderr": 0.035179450386910616 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.7, "acc_stderr": 0.046056618647183814, "acc_norm": 0.7, "acc_norm_stderr": 0.046056618647183814 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.7757575757575758, "acc_stderr": 0.03256866661681102, "acc_norm": 0.7757575757575758, "acc_norm_stderr": 0.03256866661681102 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.7878787878787878, "acc_stderr": 0.029126522834586818, "acc_norm": 0.7878787878787878, "acc_norm_stderr": 0.029126522834586818 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.9015544041450777, "acc_stderr": 0.021500249576033456, "acc_norm": 0.9015544041450777, "acc_norm_stderr": 0.021500249576033456 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.6641025641025641, "acc_stderr": 0.023946724741563976, "acc_norm": 0.6641025641025641, "acc_norm_stderr": 0.023946724741563976 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.35185185185185186, "acc_stderr": 0.029116617606083004, "acc_norm": 0.35185185185185186, "acc_norm_stderr": 0.029116617606083004 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.6974789915966386, "acc_stderr": 0.02983796238829194, "acc_norm": 0.6974789915966386, "acc_norm_stderr": 0.02983796238829194 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.33112582781456956, "acc_stderr": 0.038425817186598696, "acc_norm": 0.33112582781456956, "acc_norm_stderr": 0.038425817186598696 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.8568807339449541, "acc_stderr": 0.015014462497168589, "acc_norm": 0.8568807339449541, "acc_norm_stderr": 0.015014462497168589 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.5416666666666666, "acc_stderr": 0.03398110890294636, "acc_norm": 0.5416666666666666, "acc_norm_stderr": 0.03398110890294636 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.8235294117647058, "acc_stderr": 0.026756401538078966, "acc_norm": 0.8235294117647058, "acc_norm_stderr": 0.026756401538078966 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.8143459915611815, "acc_stderr": 0.025310495376944867, "acc_norm": 0.8143459915611815, "acc_norm_stderr": 0.025310495376944867 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.6860986547085202, "acc_stderr": 0.031146796482972465, "acc_norm": 0.6860986547085202, "acc_norm_stderr": 0.031146796482972465 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.7938931297709924, "acc_stderr": 0.03547771004159465, "acc_norm": 0.7938931297709924, "acc_norm_stderr": 0.03547771004159465 }, "harness|hendrycksTest-international_law|5": { "acc": 0.7933884297520661, "acc_stderr": 0.03695980128098824, "acc_norm": 0.7933884297520661, "acc_norm_stderr": 0.03695980128098824 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.8055555555555556, "acc_stderr": 0.038260763248848646, "acc_norm": 0.8055555555555556, "acc_norm_stderr": 0.038260763248848646 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.7668711656441718, "acc_stderr": 0.0332201579577674, "acc_norm": 0.7668711656441718, "acc_norm_stderr": 0.0332201579577674 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.44642857142857145, "acc_stderr": 0.04718471485219588, "acc_norm": 0.44642857142857145, "acc_norm_stderr": 0.04718471485219588 }, "harness|hendrycksTest-management|5": { "acc": 0.7864077669902912, "acc_stderr": 0.040580420156460344, "acc_norm": 0.7864077669902912, "acc_norm_stderr": 0.040580420156460344 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8717948717948718, "acc_stderr": 0.02190190511507333, "acc_norm": 0.8717948717948718, "acc_norm_stderr": 0.02190190511507333 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.75, "acc_stderr": 0.04351941398892446, "acc_norm": 0.75, "acc_norm_stderr": 0.04351941398892446 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.8339719029374202, "acc_stderr": 0.013306478243066302, "acc_norm": 0.8339719029374202, "acc_norm_stderr": 0.013306478243066302 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.7427745664739884, "acc_stderr": 0.02353292543104429, "acc_norm": 0.7427745664739884, "acc_norm_stderr": 0.02353292543104429 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.39553072625698327, "acc_stderr": 0.016353415410075775, "acc_norm": 0.39553072625698327, "acc_norm_stderr": 0.016353415410075775 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.7287581699346405, "acc_stderr": 0.025457756696667885, "acc_norm": 0.7287581699346405, "acc_norm_stderr": 0.025457756696667885 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.7106109324758842, "acc_stderr": 0.025755865922632945, "acc_norm": 0.7106109324758842, "acc_norm_stderr": 0.025755865922632945 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.75, "acc_stderr": 0.02409347123262133, "acc_norm": 0.75, "acc_norm_stderr": 0.02409347123262133 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.48936170212765956, "acc_stderr": 0.02982074719142248, "acc_norm": 0.48936170212765956, "acc_norm_stderr": 0.02982074719142248 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.47131681877444587, "acc_stderr": 0.012749206007657474, "acc_norm": 0.47131681877444587, "acc_norm_stderr": 0.012749206007657474 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.6838235294117647, "acc_stderr": 0.028245687391462927, "acc_norm": 0.6838235294117647, "acc_norm_stderr": 0.028245687391462927 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.6830065359477124, "acc_stderr": 0.018824219512706207, "acc_norm": 0.6830065359477124, "acc_norm_stderr": 0.018824219512706207 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.6818181818181818, "acc_stderr": 0.044612721759105085, "acc_norm": 0.6818181818181818, "acc_norm_stderr": 0.044612721759105085 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.726530612244898, "acc_stderr": 0.028535560337128445, "acc_norm": 0.726530612244898, "acc_norm_stderr": 0.028535560337128445 }, "harness|hendrycksTest-sociology|5": { "acc": 0.8407960199004975, "acc_stderr": 0.02587064676616913, "acc_norm": 0.8407960199004975, "acc_norm_stderr": 0.02587064676616913 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.85, "acc_stderr": 0.0358870281282637, "acc_norm": 0.85, "acc_norm_stderr": 0.0358870281282637 }, "harness|hendrycksTest-virology|5": { "acc": 0.5421686746987951, "acc_stderr": 0.0387862677100236, "acc_norm": 0.5421686746987951, "acc_norm_stderr": 0.0387862677100236 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8128654970760234, "acc_stderr": 0.02991312723236804, "acc_norm": 0.8128654970760234, "acc_norm_stderr": 0.02991312723236804 }, "harness|truthfulqa:mc|0": { "mc1": 0.43818849449204406, "mc1_stderr": 0.017369236164404438, "mc2": 0.6010546669403226, "mc2_stderr": 0.015217019912801435 }, "harness|winogrande|5": { "acc": 0.8129439621152328, "acc_stderr": 0.01095971643524291 }, "harness|gsm8k|5": { "acc": 0.7210007581501138, "acc_stderr": 0.012354115779970306 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_janhq__supermario-slerp
[ "region:us" ]
2023-12-12T03:40:07+00:00
{"pretty_name": "Evaluation run of janhq/supermario-slerp", "dataset_summary": "Dataset automatically created during the evaluation run of model [janhq/supermario-slerp](https://huggingface.co/janhq/supermario-slerp) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_janhq__supermario-slerp\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T03:37:12.561456](https://huggingface.co/datasets/open-llm-leaderboard/details_janhq__supermario-slerp/blob/main/results_2023-12-12T03-37-12.561456.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.6537706437546362,\n \"acc_stderr\": 0.031995288620737625,\n \"acc_norm\": 0.6536617491214738,\n \"acc_norm_stderr\": 0.03265751997212851,\n \"mc1\": 0.43818849449204406,\n \"mc1_stderr\": 0.017369236164404438,\n \"mc2\": 0.6010546669403226,\n \"mc2_stderr\": 0.015217019912801435\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.6578498293515358,\n \"acc_stderr\": 0.013864152159177278,\n \"acc_norm\": 0.689419795221843,\n \"acc_norm_stderr\": 0.013522292098053069\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6773551085441147,\n \"acc_stderr\": 0.004665327309399185,\n \"acc_norm\": 0.8657637920732921,\n \"acc_norm_stderr\": 0.0034020920763237457\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.32,\n \"acc_stderr\": 0.04688261722621504,\n \"acc_norm\": 0.32,\n \"acc_norm_stderr\": 0.04688261722621504\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.6370370370370371,\n \"acc_stderr\": 0.041539484047423976,\n \"acc_norm\": 0.6370370370370371,\n \"acc_norm_stderr\": 0.041539484047423976\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.7236842105263158,\n \"acc_stderr\": 0.03639057569952929,\n \"acc_norm\": 0.7236842105263158,\n \"acc_norm_stderr\": 0.03639057569952929\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.64,\n \"acc_stderr\": 0.04824181513244218,\n \"acc_norm\": 0.64,\n \"acc_norm_stderr\": 0.04824181513244218\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.720754716981132,\n \"acc_stderr\": 0.027611163402399715,\n \"acc_norm\": 0.720754716981132,\n \"acc_norm_stderr\": 0.027611163402399715\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.7708333333333334,\n \"acc_stderr\": 0.03514697467862388,\n \"acc_norm\": 0.7708333333333334,\n \"acc_norm_stderr\": 0.03514697467862388\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.46,\n \"acc_stderr\": 0.05009082659620333,\n \"acc_norm\": 0.46,\n \"acc_norm_stderr\": 0.05009082659620333\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.53,\n \"acc_stderr\": 0.050161355804659205,\n \"acc_norm\": 0.53,\n \"acc_norm_stderr\": 0.050161355804659205\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.34,\n \"acc_stderr\": 0.04760952285695235,\n \"acc_norm\": 0.34,\n \"acc_norm_stderr\": 0.04760952285695235\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.6705202312138728,\n \"acc_stderr\": 0.03583901754736412,\n \"acc_norm\": 0.6705202312138728,\n \"acc_norm_stderr\": 0.03583901754736412\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.39215686274509803,\n \"acc_stderr\": 0.048580835742663454,\n \"acc_norm\": 0.39215686274509803,\n \"acc_norm_stderr\": 0.048580835742663454\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.77,\n \"acc_stderr\": 0.042295258468165065,\n \"acc_norm\": 0.77,\n \"acc_norm_stderr\": 0.042295258468165065\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.5957446808510638,\n \"acc_stderr\": 0.03208115750788684,\n \"acc_norm\": 0.5957446808510638,\n \"acc_norm_stderr\": 0.03208115750788684\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.4824561403508772,\n \"acc_stderr\": 0.04700708033551038,\n \"acc_norm\": 0.4824561403508772,\n \"acc_norm_stderr\": 0.04700708033551038\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5448275862068965,\n \"acc_stderr\": 0.04149886942192117,\n \"acc_norm\": 0.5448275862068965,\n \"acc_norm_stderr\": 0.04149886942192117\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.42592592592592593,\n \"acc_stderr\": 0.02546714904546955,\n \"acc_norm\": 0.42592592592592593,\n \"acc_norm_stderr\": 0.02546714904546955\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.4603174603174603,\n \"acc_stderr\": 0.04458029125470973,\n \"acc_norm\": 0.4603174603174603,\n \"acc_norm_stderr\": 0.04458029125470973\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.36,\n \"acc_stderr\": 0.048241815132442176,\n \"acc_norm\": 0.36,\n \"acc_norm_stderr\": 0.048241815132442176\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.7741935483870968,\n \"acc_stderr\": 0.023785577884181015,\n \"acc_norm\": 0.7741935483870968,\n \"acc_norm_stderr\": 0.023785577884181015\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.5024630541871922,\n \"acc_stderr\": 0.035179450386910616,\n \"acc_norm\": 0.5024630541871922,\n \"acc_norm_stderr\": 0.035179450386910616\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.7,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.7,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7757575757575758,\n \"acc_stderr\": 0.03256866661681102,\n \"acc_norm\": 0.7757575757575758,\n \"acc_norm_stderr\": 0.03256866661681102\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.7878787878787878,\n \"acc_stderr\": 0.029126522834586818,\n \"acc_norm\": 0.7878787878787878,\n \"acc_norm_stderr\": 0.029126522834586818\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.9015544041450777,\n \"acc_stderr\": 0.021500249576033456,\n \"acc_norm\": 0.9015544041450777,\n \"acc_norm_stderr\": 0.021500249576033456\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.6641025641025641,\n \"acc_stderr\": 0.023946724741563976,\n \"acc_norm\": 0.6641025641025641,\n \"acc_norm_stderr\": 0.023946724741563976\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.35185185185185186,\n \"acc_stderr\": 0.029116617606083004,\n \"acc_norm\": 0.35185185185185186,\n \"acc_norm_stderr\": 0.029116617606083004\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.6974789915966386,\n \"acc_stderr\": 0.02983796238829194,\n \"acc_norm\": 0.6974789915966386,\n \"acc_norm_stderr\": 0.02983796238829194\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.33112582781456956,\n \"acc_stderr\": 0.038425817186598696,\n \"acc_norm\": 0.33112582781456956,\n \"acc_norm_stderr\": 0.038425817186598696\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.8568807339449541,\n \"acc_stderr\": 0.015014462497168589,\n \"acc_norm\": 0.8568807339449541,\n \"acc_norm_stderr\": 0.015014462497168589\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.5416666666666666,\n \"acc_stderr\": 0.03398110890294636,\n \"acc_norm\": 0.5416666666666666,\n \"acc_norm_stderr\": 0.03398110890294636\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.8235294117647058,\n \"acc_stderr\": 0.026756401538078966,\n \"acc_norm\": 0.8235294117647058,\n \"acc_norm_stderr\": 0.026756401538078966\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.8143459915611815,\n \"acc_stderr\": 0.025310495376944867,\n \"acc_norm\": 0.8143459915611815,\n \"acc_norm_stderr\": 0.025310495376944867\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6860986547085202,\n \"acc_stderr\": 0.031146796482972465,\n \"acc_norm\": 0.6860986547085202,\n \"acc_norm_stderr\": 0.031146796482972465\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.7938931297709924,\n \"acc_stderr\": 0.03547771004159465,\n \"acc_norm\": 0.7938931297709924,\n \"acc_norm_stderr\": 0.03547771004159465\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.7933884297520661,\n \"acc_stderr\": 0.03695980128098824,\n \"acc_norm\": 0.7933884297520661,\n \"acc_norm_stderr\": 0.03695980128098824\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.8055555555555556,\n \"acc_stderr\": 0.038260763248848646,\n \"acc_norm\": 0.8055555555555556,\n \"acc_norm_stderr\": 0.038260763248848646\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.7668711656441718,\n \"acc_stderr\": 0.0332201579577674,\n \"acc_norm\": 0.7668711656441718,\n \"acc_norm_stderr\": 0.0332201579577674\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.44642857142857145,\n \"acc_stderr\": 0.04718471485219588,\n \"acc_norm\": 0.44642857142857145,\n \"acc_norm_stderr\": 0.04718471485219588\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.7864077669902912,\n \"acc_stderr\": 0.040580420156460344,\n \"acc_norm\": 0.7864077669902912,\n \"acc_norm_stderr\": 0.040580420156460344\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8717948717948718,\n \"acc_stderr\": 0.02190190511507333,\n \"acc_norm\": 0.8717948717948718,\n \"acc_norm_stderr\": 0.02190190511507333\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.75,\n \"acc_stderr\": 0.04351941398892446,\n \"acc_norm\": 0.75,\n \"acc_norm_stderr\": 0.04351941398892446\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.8339719029374202,\n \"acc_stderr\": 0.013306478243066302,\n \"acc_norm\": 0.8339719029374202,\n \"acc_norm_stderr\": 0.013306478243066302\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.7427745664739884,\n \"acc_stderr\": 0.02353292543104429,\n \"acc_norm\": 0.7427745664739884,\n \"acc_norm_stderr\": 0.02353292543104429\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.39553072625698327,\n \"acc_stderr\": 0.016353415410075775,\n \"acc_norm\": 0.39553072625698327,\n \"acc_norm_stderr\": 0.016353415410075775\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.7287581699346405,\n \"acc_stderr\": 0.025457756696667885,\n \"acc_norm\": 0.7287581699346405,\n \"acc_norm_stderr\": 0.025457756696667885\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.7106109324758842,\n \"acc_stderr\": 0.025755865922632945,\n \"acc_norm\": 0.7106109324758842,\n \"acc_norm_stderr\": 0.025755865922632945\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.75,\n \"acc_stderr\": 0.02409347123262133,\n \"acc_norm\": 0.75,\n \"acc_norm_stderr\": 0.02409347123262133\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.48936170212765956,\n \"acc_stderr\": 0.02982074719142248,\n \"acc_norm\": 0.48936170212765956,\n \"acc_norm_stderr\": 0.02982074719142248\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.47131681877444587,\n \"acc_stderr\": 0.012749206007657474,\n \"acc_norm\": 0.47131681877444587,\n \"acc_norm_stderr\": 0.012749206007657474\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.6838235294117647,\n \"acc_stderr\": 0.028245687391462927,\n \"acc_norm\": 0.6838235294117647,\n \"acc_norm_stderr\": 0.028245687391462927\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.6830065359477124,\n \"acc_stderr\": 0.018824219512706207,\n \"acc_norm\": 0.6830065359477124,\n \"acc_norm_stderr\": 0.018824219512706207\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6818181818181818,\n \"acc_stderr\": 0.044612721759105085,\n \"acc_norm\": 0.6818181818181818,\n \"acc_norm_stderr\": 0.044612721759105085\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.726530612244898,\n \"acc_stderr\": 0.028535560337128445,\n \"acc_norm\": 0.726530612244898,\n \"acc_norm_stderr\": 0.028535560337128445\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.8407960199004975,\n \"acc_stderr\": 0.02587064676616913,\n \"acc_norm\": 0.8407960199004975,\n \"acc_norm_stderr\": 0.02587064676616913\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.85,\n \"acc_stderr\": 0.0358870281282637,\n \"acc_norm\": 0.85,\n \"acc_norm_stderr\": 0.0358870281282637\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.5421686746987951,\n \"acc_stderr\": 0.0387862677100236,\n \"acc_norm\": 0.5421686746987951,\n \"acc_norm_stderr\": 0.0387862677100236\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8128654970760234,\n \"acc_stderr\": 0.02991312723236804,\n \"acc_norm\": 0.8128654970760234,\n \"acc_norm_stderr\": 0.02991312723236804\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.43818849449204406,\n \"mc1_stderr\": 0.017369236164404438,\n \"mc2\": 0.6010546669403226,\n \"mc2_stderr\": 0.015217019912801435\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.8129439621152328,\n \"acc_stderr\": 0.01095971643524291\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.7210007581501138,\n \"acc_stderr\": 0.012354115779970306\n }\n}\n```", "repo_url": "https://huggingface.co/janhq/supermario-slerp", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|arc:challenge|25_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|gsm8k|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hellaswag|10_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T03-37-12.561456.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["**/details_harness|winogrande|5_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T03-37-12.561456.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T03_37_12.561456", "path": ["results_2023-12-12T03-37-12.561456.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T03-37-12.561456.parquet"]}]}]}
2023-12-12T03:40:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of janhq/supermario-slerp Dataset automatically created during the evaluation run of model janhq/supermario-slerp on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T03:37:12.561456(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of janhq/supermario-slerp\n\n\n\nDataset automatically created during the evaluation run of model janhq/supermario-slerp on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T03:37:12.561456(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of janhq/supermario-slerp\n\n\n\nDataset automatically created during the evaluation run of model janhq/supermario-slerp on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T03:37:12.561456(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 181, 66, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of janhq/supermario-slerp\n\n\n\nDataset automatically created during the evaluation run of model janhq/supermario-slerp on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T03:37:12.561456(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
854bfd2152288daa47fdc595514b6184ab557f40
# Dataset Card for Evaluation run of mistralai/Mistral-7B-Instruct-v0.2 <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_mistralai__Mistral-7B-Instruct-v0.2", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T03:37:50.599841](https://huggingface.co/datasets/open-llm-leaderboard/details_mistralai__Mistral-7B-Instruct-v0.2/blob/main/results_2023-12-12T03-37-50.599841.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.6077550413417533, "acc_stderr": 0.03310328786623656, "acc_norm": 0.6122661125091963, "acc_norm_stderr": 0.03377303167526721, "mc1": 0.5275397796817626, "mc1_stderr": 0.01747693019071219, "mc2": 0.6825629969752945, "mc2_stderr": 0.015176655501749976 }, "harness|arc:challenge|25": { "acc": 0.5895904436860068, "acc_stderr": 0.014374922192642664, "acc_norm": 0.6313993174061433, "acc_norm_stderr": 0.014097810678042203 }, "harness|hellaswag|10": { "acc": 0.6677952599083847, "acc_stderr": 0.0047004138249425636, "acc_norm": 0.8488348934475204, "acc_norm_stderr": 0.003574776594108505 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.32, "acc_stderr": 0.046882617226215034, "acc_norm": 0.32, "acc_norm_stderr": 0.046882617226215034 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.5703703703703704, "acc_stderr": 0.042763494943765995, "acc_norm": 0.5703703703703704, "acc_norm_stderr": 0.042763494943765995 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.625, "acc_stderr": 0.039397364351956274, "acc_norm": 0.625, "acc_norm_stderr": 0.039397364351956274 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.6, "acc_stderr": 0.04923659639173309, "acc_norm": 0.6, "acc_norm_stderr": 0.04923659639173309 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.6716981132075471, "acc_stderr": 0.02890159361241178, "acc_norm": 0.6716981132075471, "acc_norm_stderr": 0.02890159361241178 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.6944444444444444, "acc_stderr": 0.03852084696008534, "acc_norm": 0.6944444444444444, "acc_norm_stderr": 0.03852084696008534 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.4, "acc_stderr": 0.04923659639173309, "acc_norm": 0.4, "acc_norm_stderr": 0.04923659639173309 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.53, "acc_stderr": 0.050161355804659205, "acc_norm": 0.53, "acc_norm_stderr": 0.050161355804659205 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.39, "acc_stderr": 0.04902071300001974, "acc_norm": 0.39, "acc_norm_stderr": 0.04902071300001974 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.5895953757225434, "acc_stderr": 0.037507570448955356, "acc_norm": 0.5895953757225434, "acc_norm_stderr": 0.037507570448955356 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.4215686274509804, "acc_stderr": 0.04913595201274498, "acc_norm": 0.4215686274509804, "acc_norm_stderr": 0.04913595201274498 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.69, "acc_stderr": 0.04648231987117316, "acc_norm": 0.69, "acc_norm_stderr": 0.04648231987117316 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.5319148936170213, "acc_stderr": 0.03261936918467382, "acc_norm": 0.5319148936170213, "acc_norm_stderr": 0.03261936918467382 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.40350877192982454, "acc_stderr": 0.04615186962583703, "acc_norm": 0.40350877192982454, "acc_norm_stderr": 0.04615186962583703 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.6137931034482759, "acc_stderr": 0.04057324734419035, "acc_norm": 0.6137931034482759, "acc_norm_stderr": 0.04057324734419035 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.36772486772486773, "acc_stderr": 0.024833839825562413, "acc_norm": 0.36772486772486773, "acc_norm_stderr": 0.024833839825562413 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.42063492063492064, "acc_stderr": 0.04415438226743744, "acc_norm": 0.42063492063492064, "acc_norm_stderr": 0.04415438226743744 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.35, "acc_stderr": 0.0479372485441102, "acc_norm": 0.35, "acc_norm_stderr": 0.0479372485441102 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.632258064516129, "acc_stderr": 0.02743086657997347, "acc_norm": 0.632258064516129, "acc_norm_stderr": 0.02743086657997347 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.5123152709359606, "acc_stderr": 0.035169204442208966, "acc_norm": 0.5123152709359606, "acc_norm_stderr": 0.035169204442208966 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.65, "acc_stderr": 0.047937248544110196, "acc_norm": 0.65, "acc_norm_stderr": 0.047937248544110196 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.7393939393939394, "acc_stderr": 0.034277431758165236, "acc_norm": 0.7393939393939394, "acc_norm_stderr": 0.034277431758165236 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.7626262626262627, "acc_stderr": 0.030313710538198896, "acc_norm": 0.7626262626262627, "acc_norm_stderr": 0.030313710538198896 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.8549222797927462, "acc_stderr": 0.025416343096306443, "acc_norm": 0.8549222797927462, "acc_norm_stderr": 0.025416343096306443 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.5564102564102564, "acc_stderr": 0.025189149894764205, "acc_norm": 0.5564102564102564, "acc_norm_stderr": 0.025189149894764205 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.3, "acc_stderr": 0.027940457136228395, "acc_norm": 0.3, "acc_norm_stderr": 0.027940457136228395 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.6554621848739496, "acc_stderr": 0.030868682604121626, "acc_norm": 0.6554621848739496, "acc_norm_stderr": 0.030868682604121626 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.3576158940397351, "acc_stderr": 0.03913453431177258, "acc_norm": 0.3576158940397351, "acc_norm_stderr": 0.03913453431177258 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.7908256880733945, "acc_stderr": 0.017437937173343233, "acc_norm": 0.7908256880733945, "acc_norm_stderr": 0.017437937173343233 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.4444444444444444, "acc_stderr": 0.03388857118502326, "acc_norm": 0.4444444444444444, "acc_norm_stderr": 0.03388857118502326 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.7647058823529411, "acc_stderr": 0.029771775228145624, "acc_norm": 0.7647058823529411, "acc_norm_stderr": 0.029771775228145624 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.7552742616033755, "acc_stderr": 0.027985699387036423, "acc_norm": 0.7552742616033755, "acc_norm_stderr": 0.027985699387036423 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.6188340807174888, "acc_stderr": 0.03259625118416827, "acc_norm": 0.6188340807174888, "acc_norm_stderr": 0.03259625118416827 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.7404580152671756, "acc_stderr": 0.03844876139785271, "acc_norm": 0.7404580152671756, "acc_norm_stderr": 0.03844876139785271 }, "harness|hendrycksTest-international_law|5": { "acc": 0.8099173553719008, "acc_stderr": 0.03581796951709282, "acc_norm": 0.8099173553719008, "acc_norm_stderr": 0.03581796951709282 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.7407407407407407, "acc_stderr": 0.042365112580946336, "acc_norm": 0.7407407407407407, "acc_norm_stderr": 0.042365112580946336 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.7300613496932515, "acc_stderr": 0.034878251684978906, "acc_norm": 0.7300613496932515, "acc_norm_stderr": 0.034878251684978906 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.44642857142857145, "acc_stderr": 0.047184714852195886, "acc_norm": 0.44642857142857145, "acc_norm_stderr": 0.047184714852195886 }, "harness|hendrycksTest-management|5": { "acc": 0.7572815533980582, "acc_stderr": 0.04245022486384495, "acc_norm": 0.7572815533980582, "acc_norm_stderr": 0.04245022486384495 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8632478632478633, "acc_stderr": 0.022509033937077785, "acc_norm": 0.8632478632478633, "acc_norm_stderr": 0.022509033937077785 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.67, "acc_stderr": 0.04725815626252609, "acc_norm": 0.67, "acc_norm_stderr": 0.04725815626252609 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.7803320561941252, "acc_stderr": 0.014805384478371153, "acc_norm": 0.7803320561941252, "acc_norm_stderr": 0.014805384478371153 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.6965317919075145, "acc_stderr": 0.024752411960917205, "acc_norm": 0.6965317919075145, "acc_norm_stderr": 0.024752411960917205 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.311731843575419, "acc_stderr": 0.015491756531894637, "acc_norm": 0.311731843575419, "acc_norm_stderr": 0.015491756531894637 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.6862745098039216, "acc_stderr": 0.026568921015457138, "acc_norm": 0.6862745098039216, "acc_norm_stderr": 0.026568921015457138 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.7041800643086816, "acc_stderr": 0.025922371788818777, "acc_norm": 0.7041800643086816, "acc_norm_stderr": 0.025922371788818777 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.7006172839506173, "acc_stderr": 0.02548311560119546, "acc_norm": 0.7006172839506173, "acc_norm_stderr": 0.02548311560119546 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.450354609929078, "acc_stderr": 0.029680105565029036, "acc_norm": 0.450354609929078, "acc_norm_stderr": 0.029680105565029036 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.4348109517601043, "acc_stderr": 0.012661233805616302, "acc_norm": 0.4348109517601043, "acc_norm_stderr": 0.012661233805616302 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.6176470588235294, "acc_stderr": 0.02952009569768776, "acc_norm": 0.6176470588235294, "acc_norm_stderr": 0.02952009569768776 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.630718954248366, "acc_stderr": 0.019524316744866353, "acc_norm": 0.630718954248366, "acc_norm_stderr": 0.019524316744866353 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.7090909090909091, "acc_stderr": 0.04350271442923243, "acc_norm": 0.7090909090909091, "acc_norm_stderr": 0.04350271442923243 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.7061224489795919, "acc_stderr": 0.02916273841024977, "acc_norm": 0.7061224489795919, "acc_norm_stderr": 0.02916273841024977 }, "harness|hendrycksTest-sociology|5": { "acc": 0.7313432835820896, "acc_stderr": 0.03134328358208954, "acc_norm": 0.7313432835820896, "acc_norm_stderr": 0.03134328358208954 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.81, "acc_stderr": 0.03942772444036625, "acc_norm": 0.81, "acc_norm_stderr": 0.03942772444036625 }, "harness|hendrycksTest-virology|5": { "acc": 0.4939759036144578, "acc_stderr": 0.03892212195333047, "acc_norm": 0.4939759036144578, "acc_norm_stderr": 0.03892212195333047 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8362573099415205, "acc_stderr": 0.028380919596145866, "acc_norm": 0.8362573099415205, "acc_norm_stderr": 0.028380919596145866 }, "harness|truthfulqa:mc|0": { "mc1": 0.5275397796817626, "mc1_stderr": 0.01747693019071219, "mc2": 0.6825629969752945, "mc2_stderr": 0.015176655501749976 }, "harness|winogrande|5": { "acc": 0.7719021310181531, "acc_stderr": 0.011793015817663597 }, "harness|gsm8k|5": { "acc": 0.400303260045489, "acc_stderr": 0.013495926436566438 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_mistralai__Mistral-7B-Instruct-v0.2
[ "region:us" ]
2023-12-12T03:40:42+00:00
{"pretty_name": "Evaluation run of mistralai/Mistral-7B-Instruct-v0.2", "dataset_summary": "Dataset automatically created during the evaluation run of model [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_mistralai__Mistral-7B-Instruct-v0.2\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T03:37:50.599841](https://huggingface.co/datasets/open-llm-leaderboard/details_mistralai__Mistral-7B-Instruct-v0.2/blob/main/results_2023-12-12T03-37-50.599841.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.6077550413417533,\n \"acc_stderr\": 0.03310328786623656,\n \"acc_norm\": 0.6122661125091963,\n \"acc_norm_stderr\": 0.03377303167526721,\n \"mc1\": 0.5275397796817626,\n \"mc1_stderr\": 0.01747693019071219,\n \"mc2\": 0.6825629969752945,\n \"mc2_stderr\": 0.015176655501749976\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.5895904436860068,\n \"acc_stderr\": 0.014374922192642664,\n \"acc_norm\": 0.6313993174061433,\n \"acc_norm_stderr\": 0.014097810678042203\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6677952599083847,\n \"acc_stderr\": 0.0047004138249425636,\n \"acc_norm\": 0.8488348934475204,\n \"acc_norm_stderr\": 0.003574776594108505\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.32,\n \"acc_stderr\": 0.046882617226215034,\n \"acc_norm\": 0.32,\n \"acc_norm_stderr\": 0.046882617226215034\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.5703703703703704,\n \"acc_stderr\": 0.042763494943765995,\n \"acc_norm\": 0.5703703703703704,\n \"acc_norm_stderr\": 0.042763494943765995\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.625,\n \"acc_stderr\": 0.039397364351956274,\n \"acc_norm\": 0.625,\n \"acc_norm_stderr\": 0.039397364351956274\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.6,\n \"acc_stderr\": 0.04923659639173309,\n \"acc_norm\": 0.6,\n \"acc_norm_stderr\": 0.04923659639173309\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.6716981132075471,\n \"acc_stderr\": 0.02890159361241178,\n \"acc_norm\": 0.6716981132075471,\n \"acc_norm_stderr\": 0.02890159361241178\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.6944444444444444,\n \"acc_stderr\": 0.03852084696008534,\n \"acc_norm\": 0.6944444444444444,\n \"acc_norm_stderr\": 0.03852084696008534\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.4,\n \"acc_stderr\": 0.04923659639173309,\n \"acc_norm\": 0.4,\n \"acc_norm_stderr\": 0.04923659639173309\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.53,\n \"acc_stderr\": 0.050161355804659205,\n \"acc_norm\": 0.53,\n \"acc_norm_stderr\": 0.050161355804659205\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.39,\n \"acc_stderr\": 0.04902071300001974,\n \"acc_norm\": 0.39,\n \"acc_norm_stderr\": 0.04902071300001974\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.5895953757225434,\n \"acc_stderr\": 0.037507570448955356,\n \"acc_norm\": 0.5895953757225434,\n \"acc_norm_stderr\": 0.037507570448955356\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.4215686274509804,\n \"acc_stderr\": 0.04913595201274498,\n \"acc_norm\": 0.4215686274509804,\n \"acc_norm_stderr\": 0.04913595201274498\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.69,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.69,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.5319148936170213,\n \"acc_stderr\": 0.03261936918467382,\n \"acc_norm\": 0.5319148936170213,\n \"acc_norm_stderr\": 0.03261936918467382\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.40350877192982454,\n \"acc_stderr\": 0.04615186962583703,\n \"acc_norm\": 0.40350877192982454,\n \"acc_norm_stderr\": 0.04615186962583703\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.6137931034482759,\n \"acc_stderr\": 0.04057324734419035,\n \"acc_norm\": 0.6137931034482759,\n \"acc_norm_stderr\": 0.04057324734419035\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.36772486772486773,\n \"acc_stderr\": 0.024833839825562413,\n \"acc_norm\": 0.36772486772486773,\n \"acc_norm_stderr\": 0.024833839825562413\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.42063492063492064,\n \"acc_stderr\": 0.04415438226743744,\n \"acc_norm\": 0.42063492063492064,\n \"acc_norm_stderr\": 0.04415438226743744\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.35,\n \"acc_stderr\": 0.0479372485441102,\n \"acc_norm\": 0.35,\n \"acc_norm_stderr\": 0.0479372485441102\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.632258064516129,\n \"acc_stderr\": 0.02743086657997347,\n \"acc_norm\": 0.632258064516129,\n \"acc_norm_stderr\": 0.02743086657997347\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.5123152709359606,\n \"acc_stderr\": 0.035169204442208966,\n \"acc_norm\": 0.5123152709359606,\n \"acc_norm_stderr\": 0.035169204442208966\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.65,\n \"acc_stderr\": 0.047937248544110196,\n \"acc_norm\": 0.65,\n \"acc_norm_stderr\": 0.047937248544110196\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7393939393939394,\n \"acc_stderr\": 0.034277431758165236,\n \"acc_norm\": 0.7393939393939394,\n \"acc_norm_stderr\": 0.034277431758165236\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.7626262626262627,\n \"acc_stderr\": 0.030313710538198896,\n \"acc_norm\": 0.7626262626262627,\n \"acc_norm_stderr\": 0.030313710538198896\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.8549222797927462,\n \"acc_stderr\": 0.025416343096306443,\n \"acc_norm\": 0.8549222797927462,\n \"acc_norm_stderr\": 0.025416343096306443\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.5564102564102564,\n \"acc_stderr\": 0.025189149894764205,\n \"acc_norm\": 0.5564102564102564,\n \"acc_norm_stderr\": 0.025189149894764205\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.3,\n \"acc_stderr\": 0.027940457136228395,\n \"acc_norm\": 0.3,\n \"acc_norm_stderr\": 0.027940457136228395\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.6554621848739496,\n \"acc_stderr\": 0.030868682604121626,\n \"acc_norm\": 0.6554621848739496,\n \"acc_norm_stderr\": 0.030868682604121626\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.3576158940397351,\n \"acc_stderr\": 0.03913453431177258,\n \"acc_norm\": 0.3576158940397351,\n \"acc_norm_stderr\": 0.03913453431177258\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.7908256880733945,\n \"acc_stderr\": 0.017437937173343233,\n \"acc_norm\": 0.7908256880733945,\n \"acc_norm_stderr\": 0.017437937173343233\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.4444444444444444,\n \"acc_stderr\": 0.03388857118502326,\n \"acc_norm\": 0.4444444444444444,\n \"acc_norm_stderr\": 0.03388857118502326\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.7647058823529411,\n \"acc_stderr\": 0.029771775228145624,\n \"acc_norm\": 0.7647058823529411,\n \"acc_norm_stderr\": 0.029771775228145624\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.7552742616033755,\n \"acc_stderr\": 0.027985699387036423,\n \"acc_norm\": 0.7552742616033755,\n \"acc_norm_stderr\": 0.027985699387036423\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6188340807174888,\n \"acc_stderr\": 0.03259625118416827,\n \"acc_norm\": 0.6188340807174888,\n \"acc_norm_stderr\": 0.03259625118416827\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.7404580152671756,\n \"acc_stderr\": 0.03844876139785271,\n \"acc_norm\": 0.7404580152671756,\n \"acc_norm_stderr\": 0.03844876139785271\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.8099173553719008,\n \"acc_stderr\": 0.03581796951709282,\n \"acc_norm\": 0.8099173553719008,\n \"acc_norm_stderr\": 0.03581796951709282\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.7407407407407407,\n \"acc_stderr\": 0.042365112580946336,\n \"acc_norm\": 0.7407407407407407,\n \"acc_norm_stderr\": 0.042365112580946336\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.7300613496932515,\n \"acc_stderr\": 0.034878251684978906,\n \"acc_norm\": 0.7300613496932515,\n \"acc_norm_stderr\": 0.034878251684978906\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.44642857142857145,\n \"acc_stderr\": 0.047184714852195886,\n \"acc_norm\": 0.44642857142857145,\n \"acc_norm_stderr\": 0.047184714852195886\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.7572815533980582,\n \"acc_stderr\": 0.04245022486384495,\n \"acc_norm\": 0.7572815533980582,\n \"acc_norm_stderr\": 0.04245022486384495\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8632478632478633,\n \"acc_stderr\": 0.022509033937077785,\n \"acc_norm\": 0.8632478632478633,\n \"acc_norm_stderr\": 0.022509033937077785\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.67,\n \"acc_stderr\": 0.04725815626252609,\n \"acc_norm\": 0.67,\n \"acc_norm_stderr\": 0.04725815626252609\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.7803320561941252,\n \"acc_stderr\": 0.014805384478371153,\n \"acc_norm\": 0.7803320561941252,\n \"acc_norm_stderr\": 0.014805384478371153\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.6965317919075145,\n \"acc_stderr\": 0.024752411960917205,\n \"acc_norm\": 0.6965317919075145,\n \"acc_norm_stderr\": 0.024752411960917205\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.311731843575419,\n \"acc_stderr\": 0.015491756531894637,\n \"acc_norm\": 0.311731843575419,\n \"acc_norm_stderr\": 0.015491756531894637\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.6862745098039216,\n \"acc_stderr\": 0.026568921015457138,\n \"acc_norm\": 0.6862745098039216,\n \"acc_norm_stderr\": 0.026568921015457138\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.7041800643086816,\n \"acc_stderr\": 0.025922371788818777,\n \"acc_norm\": 0.7041800643086816,\n \"acc_norm_stderr\": 0.025922371788818777\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.7006172839506173,\n \"acc_stderr\": 0.02548311560119546,\n \"acc_norm\": 0.7006172839506173,\n \"acc_norm_stderr\": 0.02548311560119546\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.450354609929078,\n \"acc_stderr\": 0.029680105565029036,\n \"acc_norm\": 0.450354609929078,\n \"acc_norm_stderr\": 0.029680105565029036\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.4348109517601043,\n \"acc_stderr\": 0.012661233805616302,\n \"acc_norm\": 0.4348109517601043,\n \"acc_norm_stderr\": 0.012661233805616302\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.6176470588235294,\n \"acc_stderr\": 0.02952009569768776,\n \"acc_norm\": 0.6176470588235294,\n \"acc_norm_stderr\": 0.02952009569768776\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.630718954248366,\n \"acc_stderr\": 0.019524316744866353,\n \"acc_norm\": 0.630718954248366,\n \"acc_norm_stderr\": 0.019524316744866353\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.7090909090909091,\n \"acc_stderr\": 0.04350271442923243,\n \"acc_norm\": 0.7090909090909091,\n \"acc_norm_stderr\": 0.04350271442923243\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.7061224489795919,\n \"acc_stderr\": 0.02916273841024977,\n \"acc_norm\": 0.7061224489795919,\n \"acc_norm_stderr\": 0.02916273841024977\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.7313432835820896,\n \"acc_stderr\": 0.03134328358208954,\n \"acc_norm\": 0.7313432835820896,\n \"acc_norm_stderr\": 0.03134328358208954\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.81,\n \"acc_stderr\": 0.03942772444036625,\n \"acc_norm\": 0.81,\n \"acc_norm_stderr\": 0.03942772444036625\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.4939759036144578,\n \"acc_stderr\": 0.03892212195333047,\n \"acc_norm\": 0.4939759036144578,\n \"acc_norm_stderr\": 0.03892212195333047\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8362573099415205,\n \"acc_stderr\": 0.028380919596145866,\n \"acc_norm\": 0.8362573099415205,\n \"acc_norm_stderr\": 0.028380919596145866\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.5275397796817626,\n \"mc1_stderr\": 0.01747693019071219,\n \"mc2\": 0.6825629969752945,\n \"mc2_stderr\": 0.015176655501749976\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7719021310181531,\n \"acc_stderr\": 0.011793015817663597\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.400303260045489,\n \"acc_stderr\": 0.013495926436566438\n }\n}\n```", "repo_url": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|arc:challenge|25_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|gsm8k|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hellaswag|10_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T03-37-50.599841.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["**/details_harness|winogrande|5_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T03-37-50.599841.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T03_37_50.599841", "path": ["results_2023-12-12T03-37-50.599841.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T03-37-50.599841.parquet"]}]}]}
2023-12-12T03:41:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of mistralai/Mistral-7B-Instruct-v0.2 Dataset automatically created during the evaluation run of model mistralai/Mistral-7B-Instruct-v0.2 on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T03:37:50.599841(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of mistralai/Mistral-7B-Instruct-v0.2\n\n\n\nDataset automatically created during the evaluation run of model mistralai/Mistral-7B-Instruct-v0.2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T03:37:50.599841(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of mistralai/Mistral-7B-Instruct-v0.2\n\n\n\nDataset automatically created during the evaluation run of model mistralai/Mistral-7B-Instruct-v0.2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T03:37:50.599841(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 189, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of mistralai/Mistral-7B-Instruct-v0.2\n\n\n\nDataset automatically created during the evaluation run of model mistralai/Mistral-7B-Instruct-v0.2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T03:37:50.599841(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
5f0ca37494d3ac5b79c0e6d35d69c9774c945b67
# Dataset Card for Evaluation run of wang7776/Llama-2-7b-chat-hf-30-sparsity <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [wang7776/Llama-2-7b-chat-hf-30-sparsity](https://huggingface.co/wang7776/Llama-2-7b-chat-hf-30-sparsity) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_wang7776__Llama-2-7b-chat-hf-30-sparsity", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T03:37:48.728667](https://huggingface.co/datasets/open-llm-leaderboard/details_wang7776__Llama-2-7b-chat-hf-30-sparsity/blob/main/results_2023-12-12T03-37-48.728667.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.45732892704529104, "acc_stderr": 0.03428010617513505, "acc_norm": 0.4621141078743541, "acc_norm_stderr": 0.035035452136234484, "mc1": 0.2913096695226438, "mc1_stderr": 0.015905987048184828, "mc2": 0.4482303614832581, "mc2_stderr": 0.01566475317876804 }, "harness|arc:challenge|25": { "acc": 0.4786689419795222, "acc_stderr": 0.014598087973127108, "acc_norm": 0.5247440273037542, "acc_norm_stderr": 0.014593487694937742 }, "harness|hellaswag|10": { "acc": 0.5774746066520613, "acc_stderr": 0.004929517011508224, "acc_norm": 0.7657837084246166, "acc_norm_stderr": 0.0042264246245106935 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.3, "acc_stderr": 0.04605661864718381, "acc_norm": 0.3, "acc_norm_stderr": 0.04605661864718381 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.4074074074074074, "acc_stderr": 0.042446332383532286, "acc_norm": 0.4074074074074074, "acc_norm_stderr": 0.042446332383532286 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.48026315789473684, "acc_stderr": 0.040657710025626036, "acc_norm": 0.48026315789473684, "acc_norm_stderr": 0.040657710025626036 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.43, "acc_stderr": 0.049756985195624284, "acc_norm": 0.43, "acc_norm_stderr": 0.049756985195624284 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.4830188679245283, "acc_stderr": 0.030755120364119898, "acc_norm": 0.4830188679245283, "acc_norm_stderr": 0.030755120364119898 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.5069444444444444, "acc_stderr": 0.04180806750294938, "acc_norm": 0.5069444444444444, "acc_norm_stderr": 0.04180806750294938 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.29, "acc_stderr": 0.045604802157206845, "acc_norm": 0.29, "acc_norm_stderr": 0.045604802157206845 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.37, "acc_stderr": 0.04852365870939099, "acc_norm": 0.37, "acc_norm_stderr": 0.04852365870939099 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.32, "acc_stderr": 0.04688261722621505, "acc_norm": 0.32, "acc_norm_stderr": 0.04688261722621505 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.3699421965317919, "acc_stderr": 0.036812296333943194, "acc_norm": 0.3699421965317919, "acc_norm_stderr": 0.036812296333943194 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.19607843137254902, "acc_stderr": 0.039505818611799616, "acc_norm": 0.19607843137254902, "acc_norm_stderr": 0.039505818611799616 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.52, "acc_stderr": 0.050211673156867795, "acc_norm": 0.52, "acc_norm_stderr": 0.050211673156867795 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.37872340425531914, "acc_stderr": 0.03170995606040655, "acc_norm": 0.37872340425531914, "acc_norm_stderr": 0.03170995606040655 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.32456140350877194, "acc_stderr": 0.044045561573747664, "acc_norm": 0.32456140350877194, "acc_norm_stderr": 0.044045561573747664 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.45517241379310347, "acc_stderr": 0.04149886942192117, "acc_norm": 0.45517241379310347, "acc_norm_stderr": 0.04149886942192117 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.25925925925925924, "acc_stderr": 0.022569897074918424, "acc_norm": 0.25925925925925924, "acc_norm_stderr": 0.022569897074918424 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.23015873015873015, "acc_stderr": 0.03764950879790604, "acc_norm": 0.23015873015873015, "acc_norm_stderr": 0.03764950879790604 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.34, "acc_stderr": 0.04760952285695235, "acc_norm": 0.34, "acc_norm_stderr": 0.04760952285695235 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.5225806451612903, "acc_stderr": 0.02841498501970786, "acc_norm": 0.5225806451612903, "acc_norm_stderr": 0.02841498501970786 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.3399014778325123, "acc_stderr": 0.033327690684107895, "acc_norm": 0.3399014778325123, "acc_norm_stderr": 0.033327690684107895 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.41, "acc_stderr": 0.04943110704237102, "acc_norm": 0.41, "acc_norm_stderr": 0.04943110704237102 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.593939393939394, "acc_stderr": 0.03834816355401181, "acc_norm": 0.593939393939394, "acc_norm_stderr": 0.03834816355401181 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.5959595959595959, "acc_stderr": 0.03496130972056129, "acc_norm": 0.5959595959595959, "acc_norm_stderr": 0.03496130972056129 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.6683937823834197, "acc_stderr": 0.03397636541089118, "acc_norm": 0.6683937823834197, "acc_norm_stderr": 0.03397636541089118 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.38461538461538464, "acc_stderr": 0.024666744915187222, "acc_norm": 0.38461538461538464, "acc_norm_stderr": 0.024666744915187222 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.25925925925925924, "acc_stderr": 0.026719240783712177, "acc_norm": 0.25925925925925924, "acc_norm_stderr": 0.026719240783712177 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.4117647058823529, "acc_stderr": 0.03196876989195778, "acc_norm": 0.4117647058823529, "acc_norm_stderr": 0.03196876989195778 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.2913907284768212, "acc_stderr": 0.03710185726119995, "acc_norm": 0.2913907284768212, "acc_norm_stderr": 0.03710185726119995 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.6440366972477064, "acc_stderr": 0.020528559278244214, "acc_norm": 0.6440366972477064, "acc_norm_stderr": 0.020528559278244214 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.2638888888888889, "acc_stderr": 0.030058202704309846, "acc_norm": 0.2638888888888889, "acc_norm_stderr": 0.030058202704309846 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.6372549019607843, "acc_stderr": 0.03374499356319355, "acc_norm": 0.6372549019607843, "acc_norm_stderr": 0.03374499356319355 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.6160337552742616, "acc_stderr": 0.031658678064106674, "acc_norm": 0.6160337552742616, "acc_norm_stderr": 0.031658678064106674 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.5426008968609866, "acc_stderr": 0.03343577705583065, "acc_norm": 0.5426008968609866, "acc_norm_stderr": 0.03343577705583065 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.48091603053435117, "acc_stderr": 0.04382094705550989, "acc_norm": 0.48091603053435117, "acc_norm_stderr": 0.04382094705550989 }, "harness|hendrycksTest-international_law|5": { "acc": 0.628099173553719, "acc_stderr": 0.04412015806624504, "acc_norm": 0.628099173553719, "acc_norm_stderr": 0.04412015806624504 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.5462962962962963, "acc_stderr": 0.04812917324536823, "acc_norm": 0.5462962962962963, "acc_norm_stderr": 0.04812917324536823 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.49079754601226994, "acc_stderr": 0.03927705600787443, "acc_norm": 0.49079754601226994, "acc_norm_stderr": 0.03927705600787443 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.3482142857142857, "acc_stderr": 0.04521829902833586, "acc_norm": 0.3482142857142857, "acc_norm_stderr": 0.04521829902833586 }, "harness|hendrycksTest-management|5": { "acc": 0.6407766990291263, "acc_stderr": 0.047504583990416946, "acc_norm": 0.6407766990291263, "acc_norm_stderr": 0.047504583990416946 }, "harness|hendrycksTest-marketing|5": { "acc": 0.6965811965811965, "acc_stderr": 0.030118210106942638, "acc_norm": 0.6965811965811965, "acc_norm_stderr": 0.030118210106942638 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.48, "acc_stderr": 0.050211673156867795, "acc_norm": 0.48, "acc_norm_stderr": 0.050211673156867795 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.6360153256704981, "acc_stderr": 0.017205684809032232, "acc_norm": 0.6360153256704981, "acc_norm_stderr": 0.017205684809032232 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.4797687861271676, "acc_stderr": 0.026897049996382868, "acc_norm": 0.4797687861271676, "acc_norm_stderr": 0.026897049996382868 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.2446927374301676, "acc_stderr": 0.014378169884098405, "acc_norm": 0.2446927374301676, "acc_norm_stderr": 0.014378169884098405 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.4738562091503268, "acc_stderr": 0.028590752958852394, "acc_norm": 0.4738562091503268, "acc_norm_stderr": 0.028590752958852394 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.5466237942122186, "acc_stderr": 0.02827435985489425, "acc_norm": 0.5466237942122186, "acc_norm_stderr": 0.02827435985489425 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.5308641975308642, "acc_stderr": 0.02776768960683392, "acc_norm": 0.5308641975308642, "acc_norm_stderr": 0.02776768960683392 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.35106382978723405, "acc_stderr": 0.02847350127296376, "acc_norm": 0.35106382978723405, "acc_norm_stderr": 0.02847350127296376 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.34159061277705344, "acc_stderr": 0.012112391320842854, "acc_norm": 0.34159061277705344, "acc_norm_stderr": 0.012112391320842854 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.3713235294117647, "acc_stderr": 0.02934980313976587, "acc_norm": 0.3713235294117647, "acc_norm_stderr": 0.02934980313976587 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.43790849673202614, "acc_stderr": 0.020071257886886525, "acc_norm": 0.43790849673202614, "acc_norm_stderr": 0.020071257886886525 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.4727272727272727, "acc_stderr": 0.04782001791380063, "acc_norm": 0.4727272727272727, "acc_norm_stderr": 0.04782001791380063 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.46530612244897956, "acc_stderr": 0.031932070244253145, "acc_norm": 0.46530612244897956, "acc_norm_stderr": 0.031932070244253145 }, "harness|hendrycksTest-sociology|5": { "acc": 0.6417910447761194, "acc_stderr": 0.03390393042268814, "acc_norm": 0.6417910447761194, "acc_norm_stderr": 0.03390393042268814 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.69, "acc_stderr": 0.04648231987117316, "acc_norm": 0.69, "acc_norm_stderr": 0.04648231987117316 }, "harness|hendrycksTest-virology|5": { "acc": 0.4457831325301205, "acc_stderr": 0.03869543323472101, "acc_norm": 0.4457831325301205, "acc_norm_stderr": 0.03869543323472101 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.6900584795321637, "acc_stderr": 0.035469769593931624, "acc_norm": 0.6900584795321637, "acc_norm_stderr": 0.035469769593931624 }, "harness|truthfulqa:mc|0": { "mc1": 0.2913096695226438, "mc1_stderr": 0.015905987048184828, "mc2": 0.4482303614832581, "mc2_stderr": 0.01566475317876804 }, "harness|winogrande|5": { "acc": 0.6961325966850829, "acc_stderr": 0.01292620947548357 }, "harness|gsm8k|5": { "acc": 0.17058377558756635, "acc_stderr": 0.010360898504733311 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_wang7776__Llama-2-7b-chat-hf-30-sparsity
[ "region:us" ]
2023-12-12T03:40:53+00:00
{"pretty_name": "Evaluation run of wang7776/Llama-2-7b-chat-hf-30-sparsity", "dataset_summary": "Dataset automatically created during the evaluation run of model [wang7776/Llama-2-7b-chat-hf-30-sparsity](https://huggingface.co/wang7776/Llama-2-7b-chat-hf-30-sparsity) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_wang7776__Llama-2-7b-chat-hf-30-sparsity\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T03:37:48.728667](https://huggingface.co/datasets/open-llm-leaderboard/details_wang7776__Llama-2-7b-chat-hf-30-sparsity/blob/main/results_2023-12-12T03-37-48.728667.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.45732892704529104,\n \"acc_stderr\": 0.03428010617513505,\n \"acc_norm\": 0.4621141078743541,\n \"acc_norm_stderr\": 0.035035452136234484,\n \"mc1\": 0.2913096695226438,\n \"mc1_stderr\": 0.015905987048184828,\n \"mc2\": 0.4482303614832581,\n \"mc2_stderr\": 0.01566475317876804\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.4786689419795222,\n \"acc_stderr\": 0.014598087973127108,\n \"acc_norm\": 0.5247440273037542,\n \"acc_norm_stderr\": 0.014593487694937742\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.5774746066520613,\n \"acc_stderr\": 0.004929517011508224,\n \"acc_norm\": 0.7657837084246166,\n \"acc_norm_stderr\": 0.0042264246245106935\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.3,\n \"acc_stderr\": 0.04605661864718381,\n \"acc_norm\": 0.3,\n \"acc_norm_stderr\": 0.04605661864718381\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.4074074074074074,\n \"acc_stderr\": 0.042446332383532286,\n \"acc_norm\": 0.4074074074074074,\n \"acc_norm_stderr\": 0.042446332383532286\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.48026315789473684,\n \"acc_stderr\": 0.040657710025626036,\n \"acc_norm\": 0.48026315789473684,\n \"acc_norm_stderr\": 0.040657710025626036\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.43,\n \"acc_stderr\": 0.049756985195624284,\n \"acc_norm\": 0.43,\n \"acc_norm_stderr\": 0.049756985195624284\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.4830188679245283,\n \"acc_stderr\": 0.030755120364119898,\n \"acc_norm\": 0.4830188679245283,\n \"acc_norm_stderr\": 0.030755120364119898\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.5069444444444444,\n \"acc_stderr\": 0.04180806750294938,\n \"acc_norm\": 0.5069444444444444,\n \"acc_norm_stderr\": 0.04180806750294938\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.29,\n \"acc_stderr\": 0.045604802157206845,\n \"acc_norm\": 0.29,\n \"acc_norm_stderr\": 0.045604802157206845\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.37,\n \"acc_stderr\": 0.04852365870939099,\n \"acc_norm\": 0.37,\n \"acc_norm_stderr\": 0.04852365870939099\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.32,\n \"acc_stderr\": 0.04688261722621505,\n \"acc_norm\": 0.32,\n \"acc_norm_stderr\": 0.04688261722621505\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.3699421965317919,\n \"acc_stderr\": 0.036812296333943194,\n \"acc_norm\": 0.3699421965317919,\n \"acc_norm_stderr\": 0.036812296333943194\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.19607843137254902,\n \"acc_stderr\": 0.039505818611799616,\n \"acc_norm\": 0.19607843137254902,\n \"acc_norm_stderr\": 0.039505818611799616\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.52,\n \"acc_stderr\": 0.050211673156867795,\n \"acc_norm\": 0.52,\n \"acc_norm_stderr\": 0.050211673156867795\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.37872340425531914,\n \"acc_stderr\": 0.03170995606040655,\n \"acc_norm\": 0.37872340425531914,\n \"acc_norm_stderr\": 0.03170995606040655\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.32456140350877194,\n \"acc_stderr\": 0.044045561573747664,\n \"acc_norm\": 0.32456140350877194,\n \"acc_norm_stderr\": 0.044045561573747664\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.45517241379310347,\n \"acc_stderr\": 0.04149886942192117,\n \"acc_norm\": 0.45517241379310347,\n \"acc_norm_stderr\": 0.04149886942192117\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.25925925925925924,\n \"acc_stderr\": 0.022569897074918424,\n \"acc_norm\": 0.25925925925925924,\n \"acc_norm_stderr\": 0.022569897074918424\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.23015873015873015,\n \"acc_stderr\": 0.03764950879790604,\n \"acc_norm\": 0.23015873015873015,\n \"acc_norm_stderr\": 0.03764950879790604\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.34,\n \"acc_stderr\": 0.04760952285695235,\n \"acc_norm\": 0.34,\n \"acc_norm_stderr\": 0.04760952285695235\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.5225806451612903,\n \"acc_stderr\": 0.02841498501970786,\n \"acc_norm\": 0.5225806451612903,\n \"acc_norm_stderr\": 0.02841498501970786\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.3399014778325123,\n \"acc_stderr\": 0.033327690684107895,\n \"acc_norm\": 0.3399014778325123,\n \"acc_norm_stderr\": 0.033327690684107895\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.41,\n \"acc_stderr\": 0.04943110704237102,\n \"acc_norm\": 0.41,\n \"acc_norm_stderr\": 0.04943110704237102\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.593939393939394,\n \"acc_stderr\": 0.03834816355401181,\n \"acc_norm\": 0.593939393939394,\n \"acc_norm_stderr\": 0.03834816355401181\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.5959595959595959,\n \"acc_stderr\": 0.03496130972056129,\n \"acc_norm\": 0.5959595959595959,\n \"acc_norm_stderr\": 0.03496130972056129\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.6683937823834197,\n \"acc_stderr\": 0.03397636541089118,\n \"acc_norm\": 0.6683937823834197,\n \"acc_norm_stderr\": 0.03397636541089118\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.38461538461538464,\n \"acc_stderr\": 0.024666744915187222,\n \"acc_norm\": 0.38461538461538464,\n \"acc_norm_stderr\": 0.024666744915187222\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.25925925925925924,\n \"acc_stderr\": 0.026719240783712177,\n \"acc_norm\": 0.25925925925925924,\n \"acc_norm_stderr\": 0.026719240783712177\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.4117647058823529,\n \"acc_stderr\": 0.03196876989195778,\n \"acc_norm\": 0.4117647058823529,\n \"acc_norm_stderr\": 0.03196876989195778\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.2913907284768212,\n \"acc_stderr\": 0.03710185726119995,\n \"acc_norm\": 0.2913907284768212,\n \"acc_norm_stderr\": 0.03710185726119995\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.6440366972477064,\n \"acc_stderr\": 0.020528559278244214,\n \"acc_norm\": 0.6440366972477064,\n \"acc_norm_stderr\": 0.020528559278244214\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.2638888888888889,\n \"acc_stderr\": 0.030058202704309846,\n \"acc_norm\": 0.2638888888888889,\n \"acc_norm_stderr\": 0.030058202704309846\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.6372549019607843,\n \"acc_stderr\": 0.03374499356319355,\n \"acc_norm\": 0.6372549019607843,\n \"acc_norm_stderr\": 0.03374499356319355\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.6160337552742616,\n \"acc_stderr\": 0.031658678064106674,\n \"acc_norm\": 0.6160337552742616,\n \"acc_norm_stderr\": 0.031658678064106674\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.5426008968609866,\n \"acc_stderr\": 0.03343577705583065,\n \"acc_norm\": 0.5426008968609866,\n \"acc_norm_stderr\": 0.03343577705583065\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.48091603053435117,\n \"acc_stderr\": 0.04382094705550989,\n \"acc_norm\": 0.48091603053435117,\n \"acc_norm_stderr\": 0.04382094705550989\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.628099173553719,\n \"acc_stderr\": 0.04412015806624504,\n \"acc_norm\": 0.628099173553719,\n \"acc_norm_stderr\": 0.04412015806624504\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.5462962962962963,\n \"acc_stderr\": 0.04812917324536823,\n \"acc_norm\": 0.5462962962962963,\n \"acc_norm_stderr\": 0.04812917324536823\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.49079754601226994,\n \"acc_stderr\": 0.03927705600787443,\n \"acc_norm\": 0.49079754601226994,\n \"acc_norm_stderr\": 0.03927705600787443\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.3482142857142857,\n \"acc_stderr\": 0.04521829902833586,\n \"acc_norm\": 0.3482142857142857,\n \"acc_norm_stderr\": 0.04521829902833586\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.6407766990291263,\n \"acc_stderr\": 0.047504583990416946,\n \"acc_norm\": 0.6407766990291263,\n \"acc_norm_stderr\": 0.047504583990416946\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.6965811965811965,\n \"acc_stderr\": 0.030118210106942638,\n \"acc_norm\": 0.6965811965811965,\n \"acc_norm_stderr\": 0.030118210106942638\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.48,\n \"acc_stderr\": 0.050211673156867795,\n \"acc_norm\": 0.48,\n \"acc_norm_stderr\": 0.050211673156867795\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.6360153256704981,\n \"acc_stderr\": 0.017205684809032232,\n \"acc_norm\": 0.6360153256704981,\n \"acc_norm_stderr\": 0.017205684809032232\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.4797687861271676,\n \"acc_stderr\": 0.026897049996382868,\n \"acc_norm\": 0.4797687861271676,\n \"acc_norm_stderr\": 0.026897049996382868\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.2446927374301676,\n \"acc_stderr\": 0.014378169884098405,\n \"acc_norm\": 0.2446927374301676,\n \"acc_norm_stderr\": 0.014378169884098405\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.4738562091503268,\n \"acc_stderr\": 0.028590752958852394,\n \"acc_norm\": 0.4738562091503268,\n \"acc_norm_stderr\": 0.028590752958852394\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.5466237942122186,\n \"acc_stderr\": 0.02827435985489425,\n \"acc_norm\": 0.5466237942122186,\n \"acc_norm_stderr\": 0.02827435985489425\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.5308641975308642,\n \"acc_stderr\": 0.02776768960683392,\n \"acc_norm\": 0.5308641975308642,\n \"acc_norm_stderr\": 0.02776768960683392\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.35106382978723405,\n \"acc_stderr\": 0.02847350127296376,\n \"acc_norm\": 0.35106382978723405,\n \"acc_norm_stderr\": 0.02847350127296376\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.34159061277705344,\n \"acc_stderr\": 0.012112391320842854,\n \"acc_norm\": 0.34159061277705344,\n \"acc_norm_stderr\": 0.012112391320842854\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.3713235294117647,\n \"acc_stderr\": 0.02934980313976587,\n \"acc_norm\": 0.3713235294117647,\n \"acc_norm_stderr\": 0.02934980313976587\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.43790849673202614,\n \"acc_stderr\": 0.020071257886886525,\n \"acc_norm\": 0.43790849673202614,\n \"acc_norm_stderr\": 0.020071257886886525\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.4727272727272727,\n \"acc_stderr\": 0.04782001791380063,\n \"acc_norm\": 0.4727272727272727,\n \"acc_norm_stderr\": 0.04782001791380063\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.46530612244897956,\n \"acc_stderr\": 0.031932070244253145,\n \"acc_norm\": 0.46530612244897956,\n \"acc_norm_stderr\": 0.031932070244253145\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.6417910447761194,\n \"acc_stderr\": 0.03390393042268814,\n \"acc_norm\": 0.6417910447761194,\n \"acc_norm_stderr\": 0.03390393042268814\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.69,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.69,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.4457831325301205,\n \"acc_stderr\": 0.03869543323472101,\n \"acc_norm\": 0.4457831325301205,\n \"acc_norm_stderr\": 0.03869543323472101\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.6900584795321637,\n \"acc_stderr\": 0.035469769593931624,\n \"acc_norm\": 0.6900584795321637,\n \"acc_norm_stderr\": 0.035469769593931624\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.2913096695226438,\n \"mc1_stderr\": 0.015905987048184828,\n \"mc2\": 0.4482303614832581,\n \"mc2_stderr\": 0.01566475317876804\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.6961325966850829,\n \"acc_stderr\": 0.01292620947548357\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.17058377558756635,\n \"acc_stderr\": 0.010360898504733311\n }\n}\n```", "repo_url": "https://huggingface.co/wang7776/Llama-2-7b-chat-hf-30-sparsity", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|arc:challenge|25_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|gsm8k|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hellaswag|10_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T03-37-48.728667.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["**/details_harness|winogrande|5_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T03-37-48.728667.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T03_37_48.728667", "path": ["results_2023-12-12T03-37-48.728667.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T03-37-48.728667.parquet"]}]}]}
2023-12-12T03:41:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of wang7776/Llama-2-7b-chat-hf-30-sparsity Dataset automatically created during the evaluation run of model wang7776/Llama-2-7b-chat-hf-30-sparsity on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T03:37:48.728667(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of wang7776/Llama-2-7b-chat-hf-30-sparsity\n\n\n\nDataset automatically created during the evaluation run of model wang7776/Llama-2-7b-chat-hf-30-sparsity on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T03:37:48.728667(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of wang7776/Llama-2-7b-chat-hf-30-sparsity\n\n\n\nDataset automatically created during the evaluation run of model wang7776/Llama-2-7b-chat-hf-30-sparsity on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T03:37:48.728667(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 199, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of wang7776/Llama-2-7b-chat-hf-30-sparsity\n\n\n\nDataset automatically created during the evaluation run of model wang7776/Llama-2-7b-chat-hf-30-sparsity on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T03:37:48.728667(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]" ]
19dcd7027a51f9db6893c1b630a89fcf199937b3
a quick and light dataset designed to PEFT fine-tune mistral 7B and improve upon its reasoning skills a fine-tuned and quantized model using this dataset can be found at netcat420/MHENN (successor coming soon)
netcat420/quiklogik
[ "license:mit", "region:us" ]
2023-12-12T03:50:41+00:00
{"license": "mit"}
2023-12-20T03:19:37+00:00
[]
[]
TAGS #license-mit #region-us
a quick and light dataset designed to PEFT fine-tune mistral 7B and improve upon its reasoning skills a fine-tuned and quantized model using this dataset can be found at netcat420/MHENN (successor coming soon)
[]
[ "TAGS\n#license-mit #region-us \n" ]
[ 11 ]
[ "passage: TAGS\n#license-mit #region-us \n" ]
3e341ff32157c861e8e0e88142aebb6288f7a477
# Dataset Card for Evaluation run of rwitz2/pee <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [rwitz2/pee](https://huggingface.co/rwitz2/pee) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_rwitz2__pee", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T03:51:33.544691](https://huggingface.co/datasets/open-llm-leaderboard/details_rwitz2__pee/blob/main/results_2023-12-12T03-51-33.544691.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.6542170997170107, "acc_stderr": 0.03207676007306859, "acc_norm": 0.6540988805189213, "acc_norm_stderr": 0.03274105444201814, "mc1": 0.4528763769889841, "mc1_stderr": 0.01742558984831402, "mc2": 0.6056136500975297, "mc2_stderr": 0.015324168216531053 }, "harness|arc:challenge|25": { "acc": 0.6689419795221843, "acc_stderr": 0.01375206241981784, "acc_norm": 0.6988054607508533, "acc_norm_stderr": 0.01340674176784764 }, "harness|hellaswag|10": { "acc": 0.6868153754232225, "acc_stderr": 0.004628409084218762, "acc_norm": 0.8688508265285799, "acc_norm_stderr": 0.003368735434161383 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.32, "acc_stderr": 0.04688261722621504, "acc_norm": 0.32, "acc_norm_stderr": 0.04688261722621504 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.6444444444444445, "acc_stderr": 0.04135176749720385, "acc_norm": 0.6444444444444445, "acc_norm_stderr": 0.04135176749720385 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.6907894736842105, "acc_stderr": 0.037610708698674805, "acc_norm": 0.6907894736842105, "acc_norm_stderr": 0.037610708698674805 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.65, "acc_stderr": 0.0479372485441102, "acc_norm": 0.65, "acc_norm_stderr": 0.0479372485441102 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.7283018867924528, "acc_stderr": 0.027377706624670713, "acc_norm": 0.7283018867924528, "acc_norm_stderr": 0.027377706624670713 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.7638888888888888, "acc_stderr": 0.03551446610810826, "acc_norm": 0.7638888888888888, "acc_norm_stderr": 0.03551446610810826 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.48, "acc_stderr": 0.050211673156867795, "acc_norm": 0.48, "acc_norm_stderr": 0.050211673156867795 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.56, "acc_stderr": 0.04988876515698589, "acc_norm": 0.56, "acc_norm_stderr": 0.04988876515698589 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.33, "acc_stderr": 0.047258156262526045, "acc_norm": 0.33, "acc_norm_stderr": 0.047258156262526045 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.6589595375722543, "acc_stderr": 0.036146654241808254, "acc_norm": 0.6589595375722543, "acc_norm_stderr": 0.036146654241808254 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.43137254901960786, "acc_stderr": 0.04928099597287534, "acc_norm": 0.43137254901960786, "acc_norm_stderr": 0.04928099597287534 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.73, "acc_stderr": 0.044619604333847394, "acc_norm": 0.73, "acc_norm_stderr": 0.044619604333847394 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.5957446808510638, "acc_stderr": 0.03208115750788684, "acc_norm": 0.5957446808510638, "acc_norm_stderr": 0.03208115750788684 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.5, "acc_stderr": 0.047036043419179864, "acc_norm": 0.5, "acc_norm_stderr": 0.047036043419179864 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.5310344827586206, "acc_stderr": 0.04158632762097828, "acc_norm": 0.5310344827586206, "acc_norm_stderr": 0.04158632762097828 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.42857142857142855, "acc_stderr": 0.025487187147859375, "acc_norm": 0.42857142857142855, "acc_norm_stderr": 0.025487187147859375 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.5079365079365079, "acc_stderr": 0.044715725362943486, "acc_norm": 0.5079365079365079, "acc_norm_stderr": 0.044715725362943486 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.33, "acc_stderr": 0.047258156262526045, "acc_norm": 0.33, "acc_norm_stderr": 0.047258156262526045 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.7774193548387097, "acc_stderr": 0.023664216671642518, "acc_norm": 0.7774193548387097, "acc_norm_stderr": 0.023664216671642518 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.4876847290640394, "acc_stderr": 0.035169204442208966, "acc_norm": 0.4876847290640394, "acc_norm_stderr": 0.035169204442208966 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.7, "acc_stderr": 0.046056618647183814, "acc_norm": 0.7, "acc_norm_stderr": 0.046056618647183814 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.793939393939394, "acc_stderr": 0.0315841532404771, "acc_norm": 0.793939393939394, "acc_norm_stderr": 0.0315841532404771 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.7929292929292929, "acc_stderr": 0.02886977846026705, "acc_norm": 0.7929292929292929, "acc_norm_stderr": 0.02886977846026705 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.9067357512953368, "acc_stderr": 0.02098685459328973, "acc_norm": 0.9067357512953368, "acc_norm_stderr": 0.02098685459328973 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.6820512820512821, "acc_stderr": 0.023610884308927865, "acc_norm": 0.6820512820512821, "acc_norm_stderr": 0.023610884308927865 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.34074074074074073, "acc_stderr": 0.028897748741131147, "acc_norm": 0.34074074074074073, "acc_norm_stderr": 0.028897748741131147 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.6848739495798319, "acc_stderr": 0.030176808288974337, "acc_norm": 0.6848739495798319, "acc_norm_stderr": 0.030176808288974337 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.37748344370860926, "acc_stderr": 0.03958027231121569, "acc_norm": 0.37748344370860926, "acc_norm_stderr": 0.03958027231121569 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.8495412844036697, "acc_stderr": 0.015328563932669237, "acc_norm": 0.8495412844036697, "acc_norm_stderr": 0.015328563932669237 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.5277777777777778, "acc_stderr": 0.0340470532865388, "acc_norm": 0.5277777777777778, "acc_norm_stderr": 0.0340470532865388 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.8186274509803921, "acc_stderr": 0.027044621719474082, "acc_norm": 0.8186274509803921, "acc_norm_stderr": 0.027044621719474082 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.8143459915611815, "acc_stderr": 0.025310495376944863, "acc_norm": 0.8143459915611815, "acc_norm_stderr": 0.025310495376944863 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.6905829596412556, "acc_stderr": 0.03102441174057221, "acc_norm": 0.6905829596412556, "acc_norm_stderr": 0.03102441174057221 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.8015267175572519, "acc_stderr": 0.03498149385462472, "acc_norm": 0.8015267175572519, "acc_norm_stderr": 0.03498149385462472 }, "harness|hendrycksTest-international_law|5": { "acc": 0.7851239669421488, "acc_stderr": 0.037494924487096966, "acc_norm": 0.7851239669421488, "acc_norm_stderr": 0.037494924487096966 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.7777777777777778, "acc_stderr": 0.0401910747255735, "acc_norm": 0.7777777777777778, "acc_norm_stderr": 0.0401910747255735 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.7607361963190185, "acc_stderr": 0.0335195387952127, "acc_norm": 0.7607361963190185, "acc_norm_stderr": 0.0335195387952127 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.4642857142857143, "acc_stderr": 0.04733667890053756, "acc_norm": 0.4642857142857143, "acc_norm_stderr": 0.04733667890053756 }, "harness|hendrycksTest-management|5": { "acc": 0.7572815533980582, "acc_stderr": 0.04245022486384495, "acc_norm": 0.7572815533980582, "acc_norm_stderr": 0.04245022486384495 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8846153846153846, "acc_stderr": 0.020930193185179323, "acc_norm": 0.8846153846153846, "acc_norm_stderr": 0.020930193185179323 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.72, "acc_stderr": 0.045126085985421276, "acc_norm": 0.72, "acc_norm_stderr": 0.045126085985421276 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.8263090676883781, "acc_stderr": 0.013547415658662257, "acc_norm": 0.8263090676883781, "acc_norm_stderr": 0.013547415658662257 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.7398843930635838, "acc_stderr": 0.023618678310069367, "acc_norm": 0.7398843930635838, "acc_norm_stderr": 0.023618678310069367 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.4044692737430168, "acc_stderr": 0.01641444091729315, "acc_norm": 0.4044692737430168, "acc_norm_stderr": 0.01641444091729315 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.7352941176470589, "acc_stderr": 0.025261691219729484, "acc_norm": 0.7352941176470589, "acc_norm_stderr": 0.025261691219729484 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.7106109324758842, "acc_stderr": 0.025755865922632945, "acc_norm": 0.7106109324758842, "acc_norm_stderr": 0.025755865922632945 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.7345679012345679, "acc_stderr": 0.024569223600460845, "acc_norm": 0.7345679012345679, "acc_norm_stderr": 0.024569223600460845 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.48226950354609927, "acc_stderr": 0.02980873964223777, "acc_norm": 0.48226950354609927, "acc_norm_stderr": 0.02980873964223777 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.4634941329856584, "acc_stderr": 0.012736153390214961, "acc_norm": 0.4634941329856584, "acc_norm_stderr": 0.012736153390214961 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.6764705882352942, "acc_stderr": 0.02841820861940676, "acc_norm": 0.6764705882352942, "acc_norm_stderr": 0.02841820861940676 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.6781045751633987, "acc_stderr": 0.018901015322093092, "acc_norm": 0.6781045751633987, "acc_norm_stderr": 0.018901015322093092 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.6818181818181818, "acc_stderr": 0.04461272175910509, "acc_norm": 0.6818181818181818, "acc_norm_stderr": 0.04461272175910509 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.7306122448979592, "acc_stderr": 0.02840125202902294, "acc_norm": 0.7306122448979592, "acc_norm_stderr": 0.02840125202902294 }, "harness|hendrycksTest-sociology|5": { "acc": 0.8507462686567164, "acc_stderr": 0.025196929874827072, "acc_norm": 0.8507462686567164, "acc_norm_stderr": 0.025196929874827072 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.86, "acc_stderr": 0.0348735088019777, "acc_norm": 0.86, "acc_norm_stderr": 0.0348735088019777 }, "harness|hendrycksTest-virology|5": { "acc": 0.536144578313253, "acc_stderr": 0.038823108508905954, "acc_norm": 0.536144578313253, "acc_norm_stderr": 0.038823108508905954 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8362573099415205, "acc_stderr": 0.028380919596145866, "acc_norm": 0.8362573099415205, "acc_norm_stderr": 0.028380919596145866 }, "harness|truthfulqa:mc|0": { "mc1": 0.4528763769889841, "mc1_stderr": 0.01742558984831402, "mc2": 0.6056136500975297, "mc2_stderr": 0.015324168216531053 }, "harness|winogrande|5": { "acc": 0.8176795580110497, "acc_stderr": 0.010851565594267198 }, "harness|gsm8k|5": { "acc": 0.709628506444276, "acc_stderr": 0.012503592481818952 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_rwitz2__pee
[ "region:us" ]
2023-12-12T03:54:29+00:00
{"pretty_name": "Evaluation run of rwitz2/pee", "dataset_summary": "Dataset automatically created during the evaluation run of model [rwitz2/pee](https://huggingface.co/rwitz2/pee) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_rwitz2__pee\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T03:51:33.544691](https://huggingface.co/datasets/open-llm-leaderboard/details_rwitz2__pee/blob/main/results_2023-12-12T03-51-33.544691.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.6542170997170107,\n \"acc_stderr\": 0.03207676007306859,\n \"acc_norm\": 0.6540988805189213,\n \"acc_norm_stderr\": 0.03274105444201814,\n \"mc1\": 0.4528763769889841,\n \"mc1_stderr\": 0.01742558984831402,\n \"mc2\": 0.6056136500975297,\n \"mc2_stderr\": 0.015324168216531053\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.6689419795221843,\n \"acc_stderr\": 0.01375206241981784,\n \"acc_norm\": 0.6988054607508533,\n \"acc_norm_stderr\": 0.01340674176784764\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6868153754232225,\n \"acc_stderr\": 0.004628409084218762,\n \"acc_norm\": 0.8688508265285799,\n \"acc_norm_stderr\": 0.003368735434161383\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.32,\n \"acc_stderr\": 0.04688261722621504,\n \"acc_norm\": 0.32,\n \"acc_norm_stderr\": 0.04688261722621504\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.6444444444444445,\n \"acc_stderr\": 0.04135176749720385,\n \"acc_norm\": 0.6444444444444445,\n \"acc_norm_stderr\": 0.04135176749720385\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.6907894736842105,\n \"acc_stderr\": 0.037610708698674805,\n \"acc_norm\": 0.6907894736842105,\n \"acc_norm_stderr\": 0.037610708698674805\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.65,\n \"acc_stderr\": 0.0479372485441102,\n \"acc_norm\": 0.65,\n \"acc_norm_stderr\": 0.0479372485441102\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.7283018867924528,\n \"acc_stderr\": 0.027377706624670713,\n \"acc_norm\": 0.7283018867924528,\n \"acc_norm_stderr\": 0.027377706624670713\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.7638888888888888,\n \"acc_stderr\": 0.03551446610810826,\n \"acc_norm\": 0.7638888888888888,\n \"acc_norm_stderr\": 0.03551446610810826\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.48,\n \"acc_stderr\": 0.050211673156867795,\n \"acc_norm\": 0.48,\n \"acc_norm_stderr\": 0.050211673156867795\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.56,\n \"acc_stderr\": 0.04988876515698589,\n \"acc_norm\": 0.56,\n \"acc_norm_stderr\": 0.04988876515698589\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.33,\n \"acc_stderr\": 0.047258156262526045,\n \"acc_norm\": 0.33,\n \"acc_norm_stderr\": 0.047258156262526045\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.6589595375722543,\n \"acc_stderr\": 0.036146654241808254,\n \"acc_norm\": 0.6589595375722543,\n \"acc_norm_stderr\": 0.036146654241808254\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.43137254901960786,\n \"acc_stderr\": 0.04928099597287534,\n \"acc_norm\": 0.43137254901960786,\n \"acc_norm_stderr\": 0.04928099597287534\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.73,\n \"acc_stderr\": 0.044619604333847394,\n \"acc_norm\": 0.73,\n \"acc_norm_stderr\": 0.044619604333847394\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.5957446808510638,\n \"acc_stderr\": 0.03208115750788684,\n \"acc_norm\": 0.5957446808510638,\n \"acc_norm_stderr\": 0.03208115750788684\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.5,\n \"acc_stderr\": 0.047036043419179864,\n \"acc_norm\": 0.5,\n \"acc_norm_stderr\": 0.047036043419179864\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5310344827586206,\n \"acc_stderr\": 0.04158632762097828,\n \"acc_norm\": 0.5310344827586206,\n \"acc_norm_stderr\": 0.04158632762097828\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.42857142857142855,\n \"acc_stderr\": 0.025487187147859375,\n \"acc_norm\": 0.42857142857142855,\n \"acc_norm_stderr\": 0.025487187147859375\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.5079365079365079,\n \"acc_stderr\": 0.044715725362943486,\n \"acc_norm\": 0.5079365079365079,\n \"acc_norm_stderr\": 0.044715725362943486\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.33,\n \"acc_stderr\": 0.047258156262526045,\n \"acc_norm\": 0.33,\n \"acc_norm_stderr\": 0.047258156262526045\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.7774193548387097,\n \"acc_stderr\": 0.023664216671642518,\n \"acc_norm\": 0.7774193548387097,\n \"acc_norm_stderr\": 0.023664216671642518\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.4876847290640394,\n \"acc_stderr\": 0.035169204442208966,\n \"acc_norm\": 0.4876847290640394,\n \"acc_norm_stderr\": 0.035169204442208966\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.7,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.7,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.793939393939394,\n \"acc_stderr\": 0.0315841532404771,\n \"acc_norm\": 0.793939393939394,\n \"acc_norm_stderr\": 0.0315841532404771\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.7929292929292929,\n \"acc_stderr\": 0.02886977846026705,\n \"acc_norm\": 0.7929292929292929,\n \"acc_norm_stderr\": 0.02886977846026705\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.9067357512953368,\n \"acc_stderr\": 0.02098685459328973,\n \"acc_norm\": 0.9067357512953368,\n \"acc_norm_stderr\": 0.02098685459328973\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.6820512820512821,\n \"acc_stderr\": 0.023610884308927865,\n \"acc_norm\": 0.6820512820512821,\n \"acc_norm_stderr\": 0.023610884308927865\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.34074074074074073,\n \"acc_stderr\": 0.028897748741131147,\n \"acc_norm\": 0.34074074074074073,\n \"acc_norm_stderr\": 0.028897748741131147\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.6848739495798319,\n \"acc_stderr\": 0.030176808288974337,\n \"acc_norm\": 0.6848739495798319,\n \"acc_norm_stderr\": 0.030176808288974337\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.37748344370860926,\n \"acc_stderr\": 0.03958027231121569,\n \"acc_norm\": 0.37748344370860926,\n \"acc_norm_stderr\": 0.03958027231121569\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.8495412844036697,\n \"acc_stderr\": 0.015328563932669237,\n \"acc_norm\": 0.8495412844036697,\n \"acc_norm_stderr\": 0.015328563932669237\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.5277777777777778,\n \"acc_stderr\": 0.0340470532865388,\n \"acc_norm\": 0.5277777777777778,\n \"acc_norm_stderr\": 0.0340470532865388\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.8186274509803921,\n \"acc_stderr\": 0.027044621719474082,\n \"acc_norm\": 0.8186274509803921,\n \"acc_norm_stderr\": 0.027044621719474082\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.8143459915611815,\n \"acc_stderr\": 0.025310495376944863,\n \"acc_norm\": 0.8143459915611815,\n \"acc_norm_stderr\": 0.025310495376944863\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6905829596412556,\n \"acc_stderr\": 0.03102441174057221,\n \"acc_norm\": 0.6905829596412556,\n \"acc_norm_stderr\": 0.03102441174057221\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.8015267175572519,\n \"acc_stderr\": 0.03498149385462472,\n \"acc_norm\": 0.8015267175572519,\n \"acc_norm_stderr\": 0.03498149385462472\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.7851239669421488,\n \"acc_stderr\": 0.037494924487096966,\n \"acc_norm\": 0.7851239669421488,\n \"acc_norm_stderr\": 0.037494924487096966\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.7777777777777778,\n \"acc_stderr\": 0.0401910747255735,\n \"acc_norm\": 0.7777777777777778,\n \"acc_norm_stderr\": 0.0401910747255735\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.7607361963190185,\n \"acc_stderr\": 0.0335195387952127,\n \"acc_norm\": 0.7607361963190185,\n \"acc_norm_stderr\": 0.0335195387952127\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.4642857142857143,\n \"acc_stderr\": 0.04733667890053756,\n \"acc_norm\": 0.4642857142857143,\n \"acc_norm_stderr\": 0.04733667890053756\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.7572815533980582,\n \"acc_stderr\": 0.04245022486384495,\n \"acc_norm\": 0.7572815533980582,\n \"acc_norm_stderr\": 0.04245022486384495\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8846153846153846,\n \"acc_stderr\": 0.020930193185179323,\n \"acc_norm\": 0.8846153846153846,\n \"acc_norm_stderr\": 0.020930193185179323\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.72,\n \"acc_stderr\": 0.045126085985421276,\n \"acc_norm\": 0.72,\n \"acc_norm_stderr\": 0.045126085985421276\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.8263090676883781,\n \"acc_stderr\": 0.013547415658662257,\n \"acc_norm\": 0.8263090676883781,\n \"acc_norm_stderr\": 0.013547415658662257\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.7398843930635838,\n \"acc_stderr\": 0.023618678310069367,\n \"acc_norm\": 0.7398843930635838,\n \"acc_norm_stderr\": 0.023618678310069367\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.4044692737430168,\n \"acc_stderr\": 0.01641444091729315,\n \"acc_norm\": 0.4044692737430168,\n \"acc_norm_stderr\": 0.01641444091729315\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.7352941176470589,\n \"acc_stderr\": 0.025261691219729484,\n \"acc_norm\": 0.7352941176470589,\n \"acc_norm_stderr\": 0.025261691219729484\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.7106109324758842,\n \"acc_stderr\": 0.025755865922632945,\n \"acc_norm\": 0.7106109324758842,\n \"acc_norm_stderr\": 0.025755865922632945\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.7345679012345679,\n \"acc_stderr\": 0.024569223600460845,\n \"acc_norm\": 0.7345679012345679,\n \"acc_norm_stderr\": 0.024569223600460845\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.48226950354609927,\n \"acc_stderr\": 0.02980873964223777,\n \"acc_norm\": 0.48226950354609927,\n \"acc_norm_stderr\": 0.02980873964223777\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.4634941329856584,\n \"acc_stderr\": 0.012736153390214961,\n \"acc_norm\": 0.4634941329856584,\n \"acc_norm_stderr\": 0.012736153390214961\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.6764705882352942,\n \"acc_stderr\": 0.02841820861940676,\n \"acc_norm\": 0.6764705882352942,\n \"acc_norm_stderr\": 0.02841820861940676\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.6781045751633987,\n \"acc_stderr\": 0.018901015322093092,\n \"acc_norm\": 0.6781045751633987,\n \"acc_norm_stderr\": 0.018901015322093092\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6818181818181818,\n \"acc_stderr\": 0.04461272175910509,\n \"acc_norm\": 0.6818181818181818,\n \"acc_norm_stderr\": 0.04461272175910509\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.7306122448979592,\n \"acc_stderr\": 0.02840125202902294,\n \"acc_norm\": 0.7306122448979592,\n \"acc_norm_stderr\": 0.02840125202902294\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.8507462686567164,\n \"acc_stderr\": 0.025196929874827072,\n \"acc_norm\": 0.8507462686567164,\n \"acc_norm_stderr\": 0.025196929874827072\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.86,\n \"acc_stderr\": 0.0348735088019777,\n \"acc_norm\": 0.86,\n \"acc_norm_stderr\": 0.0348735088019777\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.536144578313253,\n \"acc_stderr\": 0.038823108508905954,\n \"acc_norm\": 0.536144578313253,\n \"acc_norm_stderr\": 0.038823108508905954\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8362573099415205,\n \"acc_stderr\": 0.028380919596145866,\n \"acc_norm\": 0.8362573099415205,\n \"acc_norm_stderr\": 0.028380919596145866\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.4528763769889841,\n \"mc1_stderr\": 0.01742558984831402,\n \"mc2\": 0.6056136500975297,\n \"mc2_stderr\": 0.015324168216531053\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.8176795580110497,\n \"acc_stderr\": 0.010851565594267198\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.709628506444276,\n \"acc_stderr\": 0.012503592481818952\n }\n}\n```", "repo_url": "https://huggingface.co/rwitz2/pee", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|arc:challenge|25_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|gsm8k|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hellaswag|10_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T03-51-33.544691.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["**/details_harness|winogrande|5_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T03-51-33.544691.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T03_51_33.544691", "path": ["results_2023-12-12T03-51-33.544691.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T03-51-33.544691.parquet"]}]}]}
2023-12-12T03:55:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of rwitz2/pee Dataset automatically created during the evaluation run of model rwitz2/pee on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T03:51:33.544691(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of rwitz2/pee\n\n\n\nDataset automatically created during the evaluation run of model rwitz2/pee on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T03:51:33.544691(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of rwitz2/pee\n\n\n\nDataset automatically created during the evaluation run of model rwitz2/pee on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T03:51:33.544691(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 171, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of rwitz2/pee\n\n\n\nDataset automatically created during the evaluation run of model rwitz2/pee on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T03:51:33.544691(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
c7ec420b67271a93622bd3ba32ac6365951982e7
# Dataset Card for Evaluation run of janhq/supermario-v1 <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [janhq/supermario-v1](https://huggingface.co/janhq/supermario-v1) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_janhq__supermario-v1", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T03:51:54.393256](https://huggingface.co/datasets/open-llm-leaderboard/details_janhq__supermario-v1/blob/main/results_2023-12-12T03-51-54.393256.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.2686365809879161, "acc_stderr": 0.030966991471902273, "acc_norm": 0.2702927883435164, "acc_norm_stderr": 0.031792363135365875, "mc1": 0.23623011015911874, "mc1_stderr": 0.014869755015871093, "mc2": 0.472672099464425, "mc2_stderr": 0.016538310174069905 }, "harness|arc:challenge|25": { "acc": 0.2295221843003413, "acc_stderr": 0.012288926760890773, "acc_norm": 0.2773037542662116, "acc_norm_stderr": 0.013082095839059374 }, "harness|hellaswag|10": { "acc": 0.25473013343955386, "acc_stderr": 0.004348189459336535, "acc_norm": 0.2583150766779526, "acc_norm_stderr": 0.004368135676213557 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.21, "acc_stderr": 0.040936018074033256, "acc_norm": 0.21, "acc_norm_stderr": 0.040936018074033256 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.22962962962962963, "acc_stderr": 0.03633384414073461, "acc_norm": 0.22962962962962963, "acc_norm_stderr": 0.03633384414073461 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.3355263157894737, "acc_stderr": 0.03842498559395268, "acc_norm": 0.3355263157894737, "acc_norm_stderr": 0.03842498559395268 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.21, "acc_stderr": 0.040936018074033256, "acc_norm": 0.21, "acc_norm_stderr": 0.040936018074033256 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.2981132075471698, "acc_stderr": 0.028152837942493857, "acc_norm": 0.2981132075471698, "acc_norm_stderr": 0.028152837942493857 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.2638888888888889, "acc_stderr": 0.03685651095897532, "acc_norm": 0.2638888888888889, "acc_norm_stderr": 0.03685651095897532 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.41, "acc_stderr": 0.049431107042371025, "acc_norm": 0.41, "acc_norm_stderr": 0.049431107042371025 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.33, "acc_stderr": 0.04725815626252604, "acc_norm": 0.33, "acc_norm_stderr": 0.04725815626252604 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.31, "acc_stderr": 0.04648231987117316, "acc_norm": 0.31, "acc_norm_stderr": 0.04648231987117316 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.3352601156069364, "acc_stderr": 0.03599586301247078, "acc_norm": 0.3352601156069364, "acc_norm_stderr": 0.03599586301247078 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.37254901960784315, "acc_stderr": 0.04810840148082633, "acc_norm": 0.37254901960784315, "acc_norm_stderr": 0.04810840148082633 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.18, "acc_stderr": 0.038612291966536955, "acc_norm": 0.18, "acc_norm_stderr": 0.038612291966536955 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.20851063829787234, "acc_stderr": 0.026556982117838728, "acc_norm": 0.20851063829787234, "acc_norm_stderr": 0.026556982117838728 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.23684210526315788, "acc_stderr": 0.039994238792813344, "acc_norm": 0.23684210526315788, "acc_norm_stderr": 0.039994238792813344 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.2413793103448276, "acc_stderr": 0.03565998174135302, "acc_norm": 0.2413793103448276, "acc_norm_stderr": 0.03565998174135302 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.2671957671957672, "acc_stderr": 0.022789673145776564, "acc_norm": 0.2671957671957672, "acc_norm_stderr": 0.022789673145776564 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.36507936507936506, "acc_stderr": 0.04306241259127153, "acc_norm": 0.36507936507936506, "acc_norm_stderr": 0.04306241259127153 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.18, "acc_stderr": 0.03861229196653694, "acc_norm": 0.18, "acc_norm_stderr": 0.03861229196653694 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.3161290322580645, "acc_stderr": 0.02645087448904277, "acc_norm": 0.3161290322580645, "acc_norm_stderr": 0.02645087448904277 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.28078817733990147, "acc_stderr": 0.03161856335358609, "acc_norm": 0.28078817733990147, "acc_norm_stderr": 0.03161856335358609 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.19, "acc_stderr": 0.039427724440366234, "acc_norm": 0.19, "acc_norm_stderr": 0.039427724440366234 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.2545454545454545, "acc_stderr": 0.03401506715249039, "acc_norm": 0.2545454545454545, "acc_norm_stderr": 0.03401506715249039 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.35353535353535354, "acc_stderr": 0.03406086723547153, "acc_norm": 0.35353535353535354, "acc_norm_stderr": 0.03406086723547153 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.36787564766839376, "acc_stderr": 0.03480175668466036, "acc_norm": 0.36787564766839376, "acc_norm_stderr": 0.03480175668466036 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.3641025641025641, "acc_stderr": 0.02439667298509477, "acc_norm": 0.3641025641025641, "acc_norm_stderr": 0.02439667298509477 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.26296296296296295, "acc_stderr": 0.026842057873833706, "acc_norm": 0.26296296296296295, "acc_norm_stderr": 0.026842057873833706 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.3487394957983193, "acc_stderr": 0.03095663632856655, "acc_norm": 0.3487394957983193, "acc_norm_stderr": 0.03095663632856655 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.33112582781456956, "acc_stderr": 0.038425817186598696, "acc_norm": 0.33112582781456956, "acc_norm_stderr": 0.038425817186598696 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.3486238532110092, "acc_stderr": 0.020431254090714328, "acc_norm": 0.3486238532110092, "acc_norm_stderr": 0.020431254090714328 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.4722222222222222, "acc_stderr": 0.0340470532865388, "acc_norm": 0.4722222222222222, "acc_norm_stderr": 0.0340470532865388 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.2549019607843137, "acc_stderr": 0.030587591351604246, "acc_norm": 0.2549019607843137, "acc_norm_stderr": 0.030587591351604246 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.20253164556962025, "acc_stderr": 0.026160568246601457, "acc_norm": 0.20253164556962025, "acc_norm_stderr": 0.026160568246601457 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.10762331838565023, "acc_stderr": 0.020799400082879997, "acc_norm": 0.10762331838565023, "acc_norm_stderr": 0.020799400082879997 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.2824427480916031, "acc_stderr": 0.03948406125768361, "acc_norm": 0.2824427480916031, "acc_norm_stderr": 0.03948406125768361 }, "harness|hendrycksTest-international_law|5": { "acc": 0.14049586776859505, "acc_stderr": 0.03172233426002161, "acc_norm": 0.14049586776859505, "acc_norm_stderr": 0.03172233426002161 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.21296296296296297, "acc_stderr": 0.0395783547198098, "acc_norm": 0.21296296296296297, "acc_norm_stderr": 0.0395783547198098 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.2331288343558282, "acc_stderr": 0.033220157957767414, "acc_norm": 0.2331288343558282, "acc_norm_stderr": 0.033220157957767414 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.16071428571428573, "acc_stderr": 0.03485946096475741, "acc_norm": 0.16071428571428573, "acc_norm_stderr": 0.03485946096475741 }, "harness|hendrycksTest-management|5": { "acc": 0.3786407766990291, "acc_stderr": 0.04802694698258972, "acc_norm": 0.3786407766990291, "acc_norm_stderr": 0.04802694698258972 }, "harness|hendrycksTest-marketing|5": { "acc": 0.19658119658119658, "acc_stderr": 0.02603538609895129, "acc_norm": 0.19658119658119658, "acc_norm_stderr": 0.02603538609895129 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.24, "acc_stderr": 0.04292346959909281, "acc_norm": 0.24, "acc_norm_stderr": 0.04292346959909281 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.20434227330779056, "acc_stderr": 0.0144191239809319, "acc_norm": 0.20434227330779056, "acc_norm_stderr": 0.0144191239809319 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.2138728323699422, "acc_stderr": 0.022075709251757183, "acc_norm": 0.2138728323699422, "acc_norm_stderr": 0.022075709251757183 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.27262569832402234, "acc_stderr": 0.014893391735249588, "acc_norm": 0.27262569832402234, "acc_norm_stderr": 0.014893391735249588 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.29411764705882354, "acc_stderr": 0.02609016250427905, "acc_norm": 0.29411764705882354, "acc_norm_stderr": 0.02609016250427905 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.24115755627009647, "acc_stderr": 0.024296594034763426, "acc_norm": 0.24115755627009647, "acc_norm_stderr": 0.024296594034763426 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.22530864197530864, "acc_stderr": 0.023246202647819746, "acc_norm": 0.22530864197530864, "acc_norm_stderr": 0.023246202647819746 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.24113475177304963, "acc_stderr": 0.025518731049537762, "acc_norm": 0.24113475177304963, "acc_norm_stderr": 0.025518731049537762 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.24445893089960888, "acc_stderr": 0.010976425013113886, "acc_norm": 0.24445893089960888, "acc_norm_stderr": 0.010976425013113886 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.4485294117647059, "acc_stderr": 0.030211479609121593, "acc_norm": 0.4485294117647059, "acc_norm_stderr": 0.030211479609121593 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.2173202614379085, "acc_stderr": 0.01668482092914859, "acc_norm": 0.2173202614379085, "acc_norm_stderr": 0.01668482092914859 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.22727272727272727, "acc_stderr": 0.04013964554072774, "acc_norm": 0.22727272727272727, "acc_norm_stderr": 0.04013964554072774 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.4, "acc_stderr": 0.031362502409358936, "acc_norm": 0.4, "acc_norm_stderr": 0.031362502409358936 }, "harness|hendrycksTest-sociology|5": { "acc": 0.26865671641791045, "acc_stderr": 0.03134328358208954, "acc_norm": 0.26865671641791045, "acc_norm_stderr": 0.03134328358208954 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.26, "acc_stderr": 0.04408440022768078, "acc_norm": 0.26, "acc_norm_stderr": 0.04408440022768078 }, "harness|hendrycksTest-virology|5": { "acc": 0.1927710843373494, "acc_stderr": 0.030709824050565274, "acc_norm": 0.1927710843373494, "acc_norm_stderr": 0.030709824050565274 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.17543859649122806, "acc_stderr": 0.029170885500727654, "acc_norm": 0.17543859649122806, "acc_norm_stderr": 0.029170885500727654 }, "harness|truthfulqa:mc|0": { "mc1": 0.23623011015911874, "mc1_stderr": 0.014869755015871093, "mc2": 0.472672099464425, "mc2_stderr": 0.016538310174069905 }, "harness|winogrande|5": { "acc": 0.4909234411996843, "acc_stderr": 0.014050170094497707 }, "harness|gsm8k|5": { "acc": 0.0, "acc_stderr": 0.0 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_janhq__supermario-v1
[ "region:us" ]
2023-12-12T03:54:50+00:00
{"pretty_name": "Evaluation run of janhq/supermario-v1", "dataset_summary": "Dataset automatically created during the evaluation run of model [janhq/supermario-v1](https://huggingface.co/janhq/supermario-v1) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_janhq__supermario-v1\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T03:51:54.393256](https://huggingface.co/datasets/open-llm-leaderboard/details_janhq__supermario-v1/blob/main/results_2023-12-12T03-51-54.393256.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.2686365809879161,\n \"acc_stderr\": 0.030966991471902273,\n \"acc_norm\": 0.2702927883435164,\n \"acc_norm_stderr\": 0.031792363135365875,\n \"mc1\": 0.23623011015911874,\n \"mc1_stderr\": 0.014869755015871093,\n \"mc2\": 0.472672099464425,\n \"mc2_stderr\": 0.016538310174069905\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.2295221843003413,\n \"acc_stderr\": 0.012288926760890773,\n \"acc_norm\": 0.2773037542662116,\n \"acc_norm_stderr\": 0.013082095839059374\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.25473013343955386,\n \"acc_stderr\": 0.004348189459336535,\n \"acc_norm\": 0.2583150766779526,\n \"acc_norm_stderr\": 0.004368135676213557\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.21,\n \"acc_stderr\": 0.040936018074033256,\n \"acc_norm\": 0.21,\n \"acc_norm_stderr\": 0.040936018074033256\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.22962962962962963,\n \"acc_stderr\": 0.03633384414073461,\n \"acc_norm\": 0.22962962962962963,\n \"acc_norm_stderr\": 0.03633384414073461\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.3355263157894737,\n \"acc_stderr\": 0.03842498559395268,\n \"acc_norm\": 0.3355263157894737,\n \"acc_norm_stderr\": 0.03842498559395268\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.21,\n \"acc_stderr\": 0.040936018074033256,\n \"acc_norm\": 0.21,\n \"acc_norm_stderr\": 0.040936018074033256\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.2981132075471698,\n \"acc_stderr\": 0.028152837942493857,\n \"acc_norm\": 0.2981132075471698,\n \"acc_norm_stderr\": 0.028152837942493857\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.2638888888888889,\n \"acc_stderr\": 0.03685651095897532,\n \"acc_norm\": 0.2638888888888889,\n \"acc_norm_stderr\": 0.03685651095897532\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.41,\n \"acc_stderr\": 0.049431107042371025,\n \"acc_norm\": 0.41,\n \"acc_norm_stderr\": 0.049431107042371025\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.33,\n \"acc_stderr\": 0.04725815626252604,\n \"acc_norm\": 0.33,\n \"acc_norm_stderr\": 0.04725815626252604\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.31,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.31,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.3352601156069364,\n \"acc_stderr\": 0.03599586301247078,\n \"acc_norm\": 0.3352601156069364,\n \"acc_norm_stderr\": 0.03599586301247078\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.37254901960784315,\n \"acc_stderr\": 0.04810840148082633,\n \"acc_norm\": 0.37254901960784315,\n \"acc_norm_stderr\": 0.04810840148082633\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.18,\n \"acc_stderr\": 0.038612291966536955,\n \"acc_norm\": 0.18,\n \"acc_norm_stderr\": 0.038612291966536955\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.20851063829787234,\n \"acc_stderr\": 0.026556982117838728,\n \"acc_norm\": 0.20851063829787234,\n \"acc_norm_stderr\": 0.026556982117838728\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.23684210526315788,\n \"acc_stderr\": 0.039994238792813344,\n \"acc_norm\": 0.23684210526315788,\n \"acc_norm_stderr\": 0.039994238792813344\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.2413793103448276,\n \"acc_stderr\": 0.03565998174135302,\n \"acc_norm\": 0.2413793103448276,\n \"acc_norm_stderr\": 0.03565998174135302\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.2671957671957672,\n \"acc_stderr\": 0.022789673145776564,\n \"acc_norm\": 0.2671957671957672,\n \"acc_norm_stderr\": 0.022789673145776564\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.36507936507936506,\n \"acc_stderr\": 0.04306241259127153,\n \"acc_norm\": 0.36507936507936506,\n \"acc_norm_stderr\": 0.04306241259127153\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.18,\n \"acc_stderr\": 0.03861229196653694,\n \"acc_norm\": 0.18,\n \"acc_norm_stderr\": 0.03861229196653694\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.3161290322580645,\n \"acc_stderr\": 0.02645087448904277,\n \"acc_norm\": 0.3161290322580645,\n \"acc_norm_stderr\": 0.02645087448904277\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.28078817733990147,\n \"acc_stderr\": 0.03161856335358609,\n \"acc_norm\": 0.28078817733990147,\n \"acc_norm_stderr\": 0.03161856335358609\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.19,\n \"acc_stderr\": 0.039427724440366234,\n \"acc_norm\": 0.19,\n \"acc_norm_stderr\": 0.039427724440366234\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.2545454545454545,\n \"acc_stderr\": 0.03401506715249039,\n \"acc_norm\": 0.2545454545454545,\n \"acc_norm_stderr\": 0.03401506715249039\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.35353535353535354,\n \"acc_stderr\": 0.03406086723547153,\n \"acc_norm\": 0.35353535353535354,\n \"acc_norm_stderr\": 0.03406086723547153\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.36787564766839376,\n \"acc_stderr\": 0.03480175668466036,\n \"acc_norm\": 0.36787564766839376,\n \"acc_norm_stderr\": 0.03480175668466036\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.3641025641025641,\n \"acc_stderr\": 0.02439667298509477,\n \"acc_norm\": 0.3641025641025641,\n \"acc_norm_stderr\": 0.02439667298509477\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.26296296296296295,\n \"acc_stderr\": 0.026842057873833706,\n \"acc_norm\": 0.26296296296296295,\n \"acc_norm_stderr\": 0.026842057873833706\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.3487394957983193,\n \"acc_stderr\": 0.03095663632856655,\n \"acc_norm\": 0.3487394957983193,\n \"acc_norm_stderr\": 0.03095663632856655\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.33112582781456956,\n \"acc_stderr\": 0.038425817186598696,\n \"acc_norm\": 0.33112582781456956,\n \"acc_norm_stderr\": 0.038425817186598696\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.3486238532110092,\n \"acc_stderr\": 0.020431254090714328,\n \"acc_norm\": 0.3486238532110092,\n \"acc_norm_stderr\": 0.020431254090714328\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.4722222222222222,\n \"acc_stderr\": 0.0340470532865388,\n \"acc_norm\": 0.4722222222222222,\n \"acc_norm_stderr\": 0.0340470532865388\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.2549019607843137,\n \"acc_stderr\": 0.030587591351604246,\n \"acc_norm\": 0.2549019607843137,\n \"acc_norm_stderr\": 0.030587591351604246\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.20253164556962025,\n \"acc_stderr\": 0.026160568246601457,\n \"acc_norm\": 0.20253164556962025,\n \"acc_norm_stderr\": 0.026160568246601457\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.10762331838565023,\n \"acc_stderr\": 0.020799400082879997,\n \"acc_norm\": 0.10762331838565023,\n \"acc_norm_stderr\": 0.020799400082879997\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.2824427480916031,\n \"acc_stderr\": 0.03948406125768361,\n \"acc_norm\": 0.2824427480916031,\n \"acc_norm_stderr\": 0.03948406125768361\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.14049586776859505,\n \"acc_stderr\": 0.03172233426002161,\n \"acc_norm\": 0.14049586776859505,\n \"acc_norm_stderr\": 0.03172233426002161\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.21296296296296297,\n \"acc_stderr\": 0.0395783547198098,\n \"acc_norm\": 0.21296296296296297,\n \"acc_norm_stderr\": 0.0395783547198098\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.2331288343558282,\n \"acc_stderr\": 0.033220157957767414,\n \"acc_norm\": 0.2331288343558282,\n \"acc_norm_stderr\": 0.033220157957767414\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.16071428571428573,\n \"acc_stderr\": 0.03485946096475741,\n \"acc_norm\": 0.16071428571428573,\n \"acc_norm_stderr\": 0.03485946096475741\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.3786407766990291,\n \"acc_stderr\": 0.04802694698258972,\n \"acc_norm\": 0.3786407766990291,\n \"acc_norm_stderr\": 0.04802694698258972\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.19658119658119658,\n \"acc_stderr\": 0.02603538609895129,\n \"acc_norm\": 0.19658119658119658,\n \"acc_norm_stderr\": 0.02603538609895129\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.24,\n \"acc_stderr\": 0.04292346959909281,\n \"acc_norm\": 0.24,\n \"acc_norm_stderr\": 0.04292346959909281\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.20434227330779056,\n \"acc_stderr\": 0.0144191239809319,\n \"acc_norm\": 0.20434227330779056,\n \"acc_norm_stderr\": 0.0144191239809319\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.2138728323699422,\n \"acc_stderr\": 0.022075709251757183,\n \"acc_norm\": 0.2138728323699422,\n \"acc_norm_stderr\": 0.022075709251757183\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.27262569832402234,\n \"acc_stderr\": 0.014893391735249588,\n \"acc_norm\": 0.27262569832402234,\n \"acc_norm_stderr\": 0.014893391735249588\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.29411764705882354,\n \"acc_stderr\": 0.02609016250427905,\n \"acc_norm\": 0.29411764705882354,\n \"acc_norm_stderr\": 0.02609016250427905\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.24115755627009647,\n \"acc_stderr\": 0.024296594034763426,\n \"acc_norm\": 0.24115755627009647,\n \"acc_norm_stderr\": 0.024296594034763426\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.22530864197530864,\n \"acc_stderr\": 0.023246202647819746,\n \"acc_norm\": 0.22530864197530864,\n \"acc_norm_stderr\": 0.023246202647819746\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.24113475177304963,\n \"acc_stderr\": 0.025518731049537762,\n \"acc_norm\": 0.24113475177304963,\n \"acc_norm_stderr\": 0.025518731049537762\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.24445893089960888,\n \"acc_stderr\": 0.010976425013113886,\n \"acc_norm\": 0.24445893089960888,\n \"acc_norm_stderr\": 0.010976425013113886\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.4485294117647059,\n \"acc_stderr\": 0.030211479609121593,\n \"acc_norm\": 0.4485294117647059,\n \"acc_norm_stderr\": 0.030211479609121593\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.2173202614379085,\n \"acc_stderr\": 0.01668482092914859,\n \"acc_norm\": 0.2173202614379085,\n \"acc_norm_stderr\": 0.01668482092914859\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.22727272727272727,\n \"acc_stderr\": 0.04013964554072774,\n \"acc_norm\": 0.22727272727272727,\n \"acc_norm_stderr\": 0.04013964554072774\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.4,\n \"acc_stderr\": 0.031362502409358936,\n \"acc_norm\": 0.4,\n \"acc_norm_stderr\": 0.031362502409358936\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.26865671641791045,\n \"acc_stderr\": 0.03134328358208954,\n \"acc_norm\": 0.26865671641791045,\n \"acc_norm_stderr\": 0.03134328358208954\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.26,\n \"acc_stderr\": 0.04408440022768078,\n \"acc_norm\": 0.26,\n \"acc_norm_stderr\": 0.04408440022768078\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.1927710843373494,\n \"acc_stderr\": 0.030709824050565274,\n \"acc_norm\": 0.1927710843373494,\n \"acc_norm_stderr\": 0.030709824050565274\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.17543859649122806,\n \"acc_stderr\": 0.029170885500727654,\n \"acc_norm\": 0.17543859649122806,\n \"acc_norm_stderr\": 0.029170885500727654\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.23623011015911874,\n \"mc1_stderr\": 0.014869755015871093,\n \"mc2\": 0.472672099464425,\n \"mc2_stderr\": 0.016538310174069905\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.4909234411996843,\n \"acc_stderr\": 0.014050170094497707\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.0,\n \"acc_stderr\": 0.0\n }\n}\n```", "repo_url": "https://huggingface.co/janhq/supermario-v1", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|arc:challenge|25_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|gsm8k|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hellaswag|10_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T03-51-54.393256.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["**/details_harness|winogrande|5_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T03-51-54.393256.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T03_51_54.393256", "path": ["results_2023-12-12T03-51-54.393256.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T03-51-54.393256.parquet"]}]}]}
2023-12-12T03:55:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of janhq/supermario-v1 Dataset automatically created during the evaluation run of model janhq/supermario-v1 on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T03:51:54.393256(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of janhq/supermario-v1\n\n\n\nDataset automatically created during the evaluation run of model janhq/supermario-v1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T03:51:54.393256(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of janhq/supermario-v1\n\n\n\nDataset automatically created during the evaluation run of model janhq/supermario-v1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T03:51:54.393256(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 181, 66, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of janhq/supermario-v1\n\n\n\nDataset automatically created during the evaluation run of model janhq/supermario-v1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T03:51:54.393256(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
ba8456cbfe5eee81e078bd3a63611dc9aa1a539c
# Dataset Card for Evaluation run of Fredithefish/MadMix-v0.1 <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [Fredithefish/MadMix-v0.1](https://huggingface.co/Fredithefish/MadMix-v0.1) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_Fredithefish__MadMix-v0.1", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T03:52:52.102581](https://huggingface.co/datasets/open-llm-leaderboard/details_Fredithefish__MadMix-v0.1/blob/main/results_2023-12-12T03-52-52.102581.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.643234247612221, "acc_stderr": 0.032109024933031964, "acc_norm": 0.6472045014183971, "acc_norm_stderr": 0.03274129253193244, "mc1": 0.3537331701346389, "mc1_stderr": 0.016737814358846147, "mc2": 0.510505741411582, "mc2_stderr": 0.015407290000571205 }, "harness|arc:challenge|25": { "acc": 0.621160409556314, "acc_stderr": 0.014175915490000326, "acc_norm": 0.6493174061433447, "acc_norm_stderr": 0.013944635930726097 }, "harness|hellaswag|10": { "acc": 0.6555467038438558, "acc_stderr": 0.00474218516926477, "acc_norm": 0.8436566421031667, "acc_norm_stderr": 0.003624383120823458 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.29, "acc_stderr": 0.045604802157206845, "acc_norm": 0.29, "acc_norm_stderr": 0.045604802157206845 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.6148148148148148, "acc_stderr": 0.04203921040156279, "acc_norm": 0.6148148148148148, "acc_norm_stderr": 0.04203921040156279 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.6842105263157895, "acc_stderr": 0.03782728980865469, "acc_norm": 0.6842105263157895, "acc_norm_stderr": 0.03782728980865469 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.63, "acc_stderr": 0.04852365870939099, "acc_norm": 0.63, "acc_norm_stderr": 0.04852365870939099 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.7018867924528301, "acc_stderr": 0.028152837942493857, "acc_norm": 0.7018867924528301, "acc_norm_stderr": 0.028152837942493857 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.7430555555555556, "acc_stderr": 0.03653946969442099, "acc_norm": 0.7430555555555556, "acc_norm_stderr": 0.03653946969442099 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.46, "acc_stderr": 0.05009082659620332, "acc_norm": 0.46, "acc_norm_stderr": 0.05009082659620332 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.5, "acc_stderr": 0.050251890762960605, "acc_norm": 0.5, "acc_norm_stderr": 0.050251890762960605 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.37, "acc_stderr": 0.048523658709391, "acc_norm": 0.37, "acc_norm_stderr": 0.048523658709391 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.6647398843930635, "acc_stderr": 0.03599586301247078, "acc_norm": 0.6647398843930635, "acc_norm_stderr": 0.03599586301247078 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.4215686274509804, "acc_stderr": 0.04913595201274498, "acc_norm": 0.4215686274509804, "acc_norm_stderr": 0.04913595201274498 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.75, "acc_stderr": 0.04351941398892446, "acc_norm": 0.75, "acc_norm_stderr": 0.04351941398892446 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.548936170212766, "acc_stderr": 0.032529096196131965, "acc_norm": 0.548936170212766, "acc_norm_stderr": 0.032529096196131965 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.47368421052631576, "acc_stderr": 0.04697085136647863, "acc_norm": 0.47368421052631576, "acc_norm_stderr": 0.04697085136647863 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.5862068965517241, "acc_stderr": 0.04104269211806232, "acc_norm": 0.5862068965517241, "acc_norm_stderr": 0.04104269211806232 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.4074074074074074, "acc_stderr": 0.02530590624159063, "acc_norm": 0.4074074074074074, "acc_norm_stderr": 0.02530590624159063 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.49206349206349204, "acc_stderr": 0.044715725362943486, "acc_norm": 0.49206349206349204, "acc_norm_stderr": 0.044715725362943486 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.3, "acc_stderr": 0.046056618647183814, "acc_norm": 0.3, "acc_norm_stderr": 0.046056618647183814 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.8032258064516129, "acc_stderr": 0.022616409420742025, "acc_norm": 0.8032258064516129, "acc_norm_stderr": 0.022616409420742025 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.5073891625615764, "acc_stderr": 0.035176035403610105, "acc_norm": 0.5073891625615764, "acc_norm_stderr": 0.035176035403610105 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.66, "acc_stderr": 0.04760952285695237, "acc_norm": 0.66, "acc_norm_stderr": 0.04760952285695237 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.7696969696969697, "acc_stderr": 0.032876667586034906, "acc_norm": 0.7696969696969697, "acc_norm_stderr": 0.032876667586034906 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.8080808080808081, "acc_stderr": 0.028057791672989017, "acc_norm": 0.8080808080808081, "acc_norm_stderr": 0.028057791672989017 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.9015544041450777, "acc_stderr": 0.02150024957603346, "acc_norm": 0.9015544041450777, "acc_norm_stderr": 0.02150024957603346 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.6615384615384615, "acc_stderr": 0.02399150050031304, "acc_norm": 0.6615384615384615, "acc_norm_stderr": 0.02399150050031304 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.337037037037037, "acc_stderr": 0.028820884666253255, "acc_norm": 0.337037037037037, "acc_norm_stderr": 0.028820884666253255 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.6848739495798319, "acc_stderr": 0.030176808288974337, "acc_norm": 0.6848739495798319, "acc_norm_stderr": 0.030176808288974337 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.33774834437086093, "acc_stderr": 0.03861557546255169, "acc_norm": 0.33774834437086093, "acc_norm_stderr": 0.03861557546255169 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.8422018348623853, "acc_stderr": 0.015630022970092423, "acc_norm": 0.8422018348623853, "acc_norm_stderr": 0.015630022970092423 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.5277777777777778, "acc_stderr": 0.0340470532865388, "acc_norm": 0.5277777777777778, "acc_norm_stderr": 0.0340470532865388 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.8235294117647058, "acc_stderr": 0.02675640153807897, "acc_norm": 0.8235294117647058, "acc_norm_stderr": 0.02675640153807897 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.8227848101265823, "acc_stderr": 0.024856364184503228, "acc_norm": 0.8227848101265823, "acc_norm_stderr": 0.024856364184503228 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.7174887892376681, "acc_stderr": 0.030216831011508766, "acc_norm": 0.7174887892376681, "acc_norm_stderr": 0.030216831011508766 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.7862595419847328, "acc_stderr": 0.0359546161177469, "acc_norm": 0.7862595419847328, "acc_norm_stderr": 0.0359546161177469 }, "harness|hendrycksTest-international_law|5": { "acc": 0.7933884297520661, "acc_stderr": 0.03695980128098824, "acc_norm": 0.7933884297520661, "acc_norm_stderr": 0.03695980128098824 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.7870370370370371, "acc_stderr": 0.039578354719809805, "acc_norm": 0.7870370370370371, "acc_norm_stderr": 0.039578354719809805 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.754601226993865, "acc_stderr": 0.03380939813943354, "acc_norm": 0.754601226993865, "acc_norm_stderr": 0.03380939813943354 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.48214285714285715, "acc_stderr": 0.047427623612430116, "acc_norm": 0.48214285714285715, "acc_norm_stderr": 0.047427623612430116 }, "harness|hendrycksTest-management|5": { "acc": 0.8252427184466019, "acc_stderr": 0.037601780060266196, "acc_norm": 0.8252427184466019, "acc_norm_stderr": 0.037601780060266196 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8803418803418803, "acc_stderr": 0.021262719400406964, "acc_norm": 0.8803418803418803, "acc_norm_stderr": 0.021262719400406964 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.7, "acc_stderr": 0.046056618647183814, "acc_norm": 0.7, "acc_norm_stderr": 0.046056618647183814 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.8212005108556832, "acc_stderr": 0.013702643715368976, "acc_norm": 0.8212005108556832, "acc_norm_stderr": 0.013702643715368976 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.708092485549133, "acc_stderr": 0.02447699407624733, "acc_norm": 0.708092485549133, "acc_norm_stderr": 0.02447699407624733 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.38212290502793295, "acc_stderr": 0.01625113971157077, "acc_norm": 0.38212290502793295, "acc_norm_stderr": 0.01625113971157077 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.7418300653594772, "acc_stderr": 0.025058503316958143, "acc_norm": 0.7418300653594772, "acc_norm_stderr": 0.025058503316958143 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.6945337620578779, "acc_stderr": 0.026160584450140446, "acc_norm": 0.6945337620578779, "acc_norm_stderr": 0.026160584450140446 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.7067901234567902, "acc_stderr": 0.025329888171900926, "acc_norm": 0.7067901234567902, "acc_norm_stderr": 0.025329888171900926 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.46099290780141844, "acc_stderr": 0.029736592526424438, "acc_norm": 0.46099290780141844, "acc_norm_stderr": 0.029736592526424438 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.45371577574967403, "acc_stderr": 0.01271540484127774, "acc_norm": 0.45371577574967403, "acc_norm_stderr": 0.01271540484127774 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.7058823529411765, "acc_stderr": 0.02767846864214472, "acc_norm": 0.7058823529411765, "acc_norm_stderr": 0.02767846864214472 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.6650326797385621, "acc_stderr": 0.019094228167000325, "acc_norm": 0.6650326797385621, "acc_norm_stderr": 0.019094228167000325 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.6636363636363637, "acc_stderr": 0.04525393596302506, "acc_norm": 0.6636363636363637, "acc_norm_stderr": 0.04525393596302506 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.7510204081632653, "acc_stderr": 0.027682979522960238, "acc_norm": 0.7510204081632653, "acc_norm_stderr": 0.027682979522960238 }, "harness|hendrycksTest-sociology|5": { "acc": 0.845771144278607, "acc_stderr": 0.025538433368578327, "acc_norm": 0.845771144278607, "acc_norm_stderr": 0.025538433368578327 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.86, "acc_stderr": 0.03487350880197769, "acc_norm": 0.86, "acc_norm_stderr": 0.03487350880197769 }, "harness|hendrycksTest-virology|5": { "acc": 0.5602409638554217, "acc_stderr": 0.03864139923699122, "acc_norm": 0.5602409638554217, "acc_norm_stderr": 0.03864139923699122 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8187134502923976, "acc_stderr": 0.029547741687640038, "acc_norm": 0.8187134502923976, "acc_norm_stderr": 0.029547741687640038 }, "harness|truthfulqa:mc|0": { "mc1": 0.3537331701346389, "mc1_stderr": 0.016737814358846147, "mc2": 0.510505741411582, "mc2_stderr": 0.015407290000571205 }, "harness|winogrande|5": { "acc": 0.7719021310181531, "acc_stderr": 0.011793015817663588 }, "harness|gsm8k|5": { "acc": 0.4965883244882487, "acc_stderr": 0.013772164105556747 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_Fredithefish__MadMix-v0.1
[ "region:us" ]
2023-12-12T03:55:48+00:00
{"pretty_name": "Evaluation run of Fredithefish/MadMix-v0.1", "dataset_summary": "Dataset automatically created during the evaluation run of model [Fredithefish/MadMix-v0.1](https://huggingface.co/Fredithefish/MadMix-v0.1) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_Fredithefish__MadMix-v0.1\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T03:52:52.102581](https://huggingface.co/datasets/open-llm-leaderboard/details_Fredithefish__MadMix-v0.1/blob/main/results_2023-12-12T03-52-52.102581.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.643234247612221,\n \"acc_stderr\": 0.032109024933031964,\n \"acc_norm\": 0.6472045014183971,\n \"acc_norm_stderr\": 0.03274129253193244,\n \"mc1\": 0.3537331701346389,\n \"mc1_stderr\": 0.016737814358846147,\n \"mc2\": 0.510505741411582,\n \"mc2_stderr\": 0.015407290000571205\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.621160409556314,\n \"acc_stderr\": 0.014175915490000326,\n \"acc_norm\": 0.6493174061433447,\n \"acc_norm_stderr\": 0.013944635930726097\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6555467038438558,\n \"acc_stderr\": 0.00474218516926477,\n \"acc_norm\": 0.8436566421031667,\n \"acc_norm_stderr\": 0.003624383120823458\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.29,\n \"acc_stderr\": 0.045604802157206845,\n \"acc_norm\": 0.29,\n \"acc_norm_stderr\": 0.045604802157206845\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.6148148148148148,\n \"acc_stderr\": 0.04203921040156279,\n \"acc_norm\": 0.6148148148148148,\n \"acc_norm_stderr\": 0.04203921040156279\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.6842105263157895,\n \"acc_stderr\": 0.03782728980865469,\n \"acc_norm\": 0.6842105263157895,\n \"acc_norm_stderr\": 0.03782728980865469\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.63,\n \"acc_stderr\": 0.04852365870939099,\n \"acc_norm\": 0.63,\n \"acc_norm_stderr\": 0.04852365870939099\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.7018867924528301,\n \"acc_stderr\": 0.028152837942493857,\n \"acc_norm\": 0.7018867924528301,\n \"acc_norm_stderr\": 0.028152837942493857\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.7430555555555556,\n \"acc_stderr\": 0.03653946969442099,\n \"acc_norm\": 0.7430555555555556,\n \"acc_norm_stderr\": 0.03653946969442099\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.46,\n \"acc_stderr\": 0.05009082659620332,\n \"acc_norm\": 0.46,\n \"acc_norm_stderr\": 0.05009082659620332\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.5,\n \"acc_stderr\": 0.050251890762960605,\n \"acc_norm\": 0.5,\n \"acc_norm_stderr\": 0.050251890762960605\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.37,\n \"acc_stderr\": 0.048523658709391,\n \"acc_norm\": 0.37,\n \"acc_norm_stderr\": 0.048523658709391\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.6647398843930635,\n \"acc_stderr\": 0.03599586301247078,\n \"acc_norm\": 0.6647398843930635,\n \"acc_norm_stderr\": 0.03599586301247078\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.4215686274509804,\n \"acc_stderr\": 0.04913595201274498,\n \"acc_norm\": 0.4215686274509804,\n \"acc_norm_stderr\": 0.04913595201274498\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.75,\n \"acc_stderr\": 0.04351941398892446,\n \"acc_norm\": 0.75,\n \"acc_norm_stderr\": 0.04351941398892446\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.548936170212766,\n \"acc_stderr\": 0.032529096196131965,\n \"acc_norm\": 0.548936170212766,\n \"acc_norm_stderr\": 0.032529096196131965\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.47368421052631576,\n \"acc_stderr\": 0.04697085136647863,\n \"acc_norm\": 0.47368421052631576,\n \"acc_norm_stderr\": 0.04697085136647863\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5862068965517241,\n \"acc_stderr\": 0.04104269211806232,\n \"acc_norm\": 0.5862068965517241,\n \"acc_norm_stderr\": 0.04104269211806232\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.4074074074074074,\n \"acc_stderr\": 0.02530590624159063,\n \"acc_norm\": 0.4074074074074074,\n \"acc_norm_stderr\": 0.02530590624159063\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.49206349206349204,\n \"acc_stderr\": 0.044715725362943486,\n \"acc_norm\": 0.49206349206349204,\n \"acc_norm_stderr\": 0.044715725362943486\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.3,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.3,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.8032258064516129,\n \"acc_stderr\": 0.022616409420742025,\n \"acc_norm\": 0.8032258064516129,\n \"acc_norm_stderr\": 0.022616409420742025\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.5073891625615764,\n \"acc_stderr\": 0.035176035403610105,\n \"acc_norm\": 0.5073891625615764,\n \"acc_norm_stderr\": 0.035176035403610105\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.66,\n \"acc_stderr\": 0.04760952285695237,\n \"acc_norm\": 0.66,\n \"acc_norm_stderr\": 0.04760952285695237\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7696969696969697,\n \"acc_stderr\": 0.032876667586034906,\n \"acc_norm\": 0.7696969696969697,\n \"acc_norm_stderr\": 0.032876667586034906\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.8080808080808081,\n \"acc_stderr\": 0.028057791672989017,\n \"acc_norm\": 0.8080808080808081,\n \"acc_norm_stderr\": 0.028057791672989017\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.9015544041450777,\n \"acc_stderr\": 0.02150024957603346,\n \"acc_norm\": 0.9015544041450777,\n \"acc_norm_stderr\": 0.02150024957603346\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.6615384615384615,\n \"acc_stderr\": 0.02399150050031304,\n \"acc_norm\": 0.6615384615384615,\n \"acc_norm_stderr\": 0.02399150050031304\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.337037037037037,\n \"acc_stderr\": 0.028820884666253255,\n \"acc_norm\": 0.337037037037037,\n \"acc_norm_stderr\": 0.028820884666253255\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.6848739495798319,\n \"acc_stderr\": 0.030176808288974337,\n \"acc_norm\": 0.6848739495798319,\n \"acc_norm_stderr\": 0.030176808288974337\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.33774834437086093,\n \"acc_stderr\": 0.03861557546255169,\n \"acc_norm\": 0.33774834437086093,\n \"acc_norm_stderr\": 0.03861557546255169\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.8422018348623853,\n \"acc_stderr\": 0.015630022970092423,\n \"acc_norm\": 0.8422018348623853,\n \"acc_norm_stderr\": 0.015630022970092423\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.5277777777777778,\n \"acc_stderr\": 0.0340470532865388,\n \"acc_norm\": 0.5277777777777778,\n \"acc_norm_stderr\": 0.0340470532865388\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.8235294117647058,\n \"acc_stderr\": 0.02675640153807897,\n \"acc_norm\": 0.8235294117647058,\n \"acc_norm_stderr\": 0.02675640153807897\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.8227848101265823,\n \"acc_stderr\": 0.024856364184503228,\n \"acc_norm\": 0.8227848101265823,\n \"acc_norm_stderr\": 0.024856364184503228\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.7174887892376681,\n \"acc_stderr\": 0.030216831011508766,\n \"acc_norm\": 0.7174887892376681,\n \"acc_norm_stderr\": 0.030216831011508766\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.7862595419847328,\n \"acc_stderr\": 0.0359546161177469,\n \"acc_norm\": 0.7862595419847328,\n \"acc_norm_stderr\": 0.0359546161177469\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.7933884297520661,\n \"acc_stderr\": 0.03695980128098824,\n \"acc_norm\": 0.7933884297520661,\n \"acc_norm_stderr\": 0.03695980128098824\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.7870370370370371,\n \"acc_stderr\": 0.039578354719809805,\n \"acc_norm\": 0.7870370370370371,\n \"acc_norm_stderr\": 0.039578354719809805\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.754601226993865,\n \"acc_stderr\": 0.03380939813943354,\n \"acc_norm\": 0.754601226993865,\n \"acc_norm_stderr\": 0.03380939813943354\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.48214285714285715,\n \"acc_stderr\": 0.047427623612430116,\n \"acc_norm\": 0.48214285714285715,\n \"acc_norm_stderr\": 0.047427623612430116\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.8252427184466019,\n \"acc_stderr\": 0.037601780060266196,\n \"acc_norm\": 0.8252427184466019,\n \"acc_norm_stderr\": 0.037601780060266196\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8803418803418803,\n \"acc_stderr\": 0.021262719400406964,\n \"acc_norm\": 0.8803418803418803,\n \"acc_norm_stderr\": 0.021262719400406964\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.7,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.7,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.8212005108556832,\n \"acc_stderr\": 0.013702643715368976,\n \"acc_norm\": 0.8212005108556832,\n \"acc_norm_stderr\": 0.013702643715368976\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.708092485549133,\n \"acc_stderr\": 0.02447699407624733,\n \"acc_norm\": 0.708092485549133,\n \"acc_norm_stderr\": 0.02447699407624733\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.38212290502793295,\n \"acc_stderr\": 0.01625113971157077,\n \"acc_norm\": 0.38212290502793295,\n \"acc_norm_stderr\": 0.01625113971157077\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.7418300653594772,\n \"acc_stderr\": 0.025058503316958143,\n \"acc_norm\": 0.7418300653594772,\n \"acc_norm_stderr\": 0.025058503316958143\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.6945337620578779,\n \"acc_stderr\": 0.026160584450140446,\n \"acc_norm\": 0.6945337620578779,\n \"acc_norm_stderr\": 0.026160584450140446\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.7067901234567902,\n \"acc_stderr\": 0.025329888171900926,\n \"acc_norm\": 0.7067901234567902,\n \"acc_norm_stderr\": 0.025329888171900926\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.46099290780141844,\n \"acc_stderr\": 0.029736592526424438,\n \"acc_norm\": 0.46099290780141844,\n \"acc_norm_stderr\": 0.029736592526424438\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.45371577574967403,\n \"acc_stderr\": 0.01271540484127774,\n \"acc_norm\": 0.45371577574967403,\n \"acc_norm_stderr\": 0.01271540484127774\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.7058823529411765,\n \"acc_stderr\": 0.02767846864214472,\n \"acc_norm\": 0.7058823529411765,\n \"acc_norm_stderr\": 0.02767846864214472\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.6650326797385621,\n \"acc_stderr\": 0.019094228167000325,\n \"acc_norm\": 0.6650326797385621,\n \"acc_norm_stderr\": 0.019094228167000325\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6636363636363637,\n \"acc_stderr\": 0.04525393596302506,\n \"acc_norm\": 0.6636363636363637,\n \"acc_norm_stderr\": 0.04525393596302506\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.7510204081632653,\n \"acc_stderr\": 0.027682979522960238,\n \"acc_norm\": 0.7510204081632653,\n \"acc_norm_stderr\": 0.027682979522960238\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.845771144278607,\n \"acc_stderr\": 0.025538433368578327,\n \"acc_norm\": 0.845771144278607,\n \"acc_norm_stderr\": 0.025538433368578327\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.86,\n \"acc_stderr\": 0.03487350880197769,\n \"acc_norm\": 0.86,\n \"acc_norm_stderr\": 0.03487350880197769\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.5602409638554217,\n \"acc_stderr\": 0.03864139923699122,\n \"acc_norm\": 0.5602409638554217,\n \"acc_norm_stderr\": 0.03864139923699122\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8187134502923976,\n \"acc_stderr\": 0.029547741687640038,\n \"acc_norm\": 0.8187134502923976,\n \"acc_norm_stderr\": 0.029547741687640038\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.3537331701346389,\n \"mc1_stderr\": 0.016737814358846147,\n \"mc2\": 0.510505741411582,\n \"mc2_stderr\": 0.015407290000571205\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7719021310181531,\n \"acc_stderr\": 0.011793015817663588\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.4965883244882487,\n \"acc_stderr\": 0.013772164105556747\n }\n}\n```", "repo_url": "https://huggingface.co/Fredithefish/MadMix-v0.1", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|arc:challenge|25_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|gsm8k|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hellaswag|10_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T03-52-52.102581.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["**/details_harness|winogrande|5_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T03-52-52.102581.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T03_52_52.102581", "path": ["results_2023-12-12T03-52-52.102581.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T03-52-52.102581.parquet"]}]}]}
2023-12-12T03:56:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of Fredithefish/MadMix-v0.1 Dataset automatically created during the evaluation run of model Fredithefish/MadMix-v0.1 on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T03:52:52.102581(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of Fredithefish/MadMix-v0.1\n\n\n\nDataset automatically created during the evaluation run of model Fredithefish/MadMix-v0.1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T03:52:52.102581(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of Fredithefish/MadMix-v0.1\n\n\n\nDataset automatically created during the evaluation run of model Fredithefish/MadMix-v0.1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T03:52:52.102581(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 181, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of Fredithefish/MadMix-v0.1\n\n\n\nDataset automatically created during the evaluation run of model Fredithefish/MadMix-v0.1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T03:52:52.102581(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
b98824e699b6bbde78e034e52fa0b0e5965d0721
# Dataset Card for Evaluation run of rwitz2/ipo-test <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [rwitz2/ipo-test](https://huggingface.co/rwitz2/ipo-test) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_rwitz2__ipo-test", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T03:53:21.138621](https://huggingface.co/datasets/open-llm-leaderboard/details_rwitz2__ipo-test/blob/main/results_2023-12-12T03-53-21.138621.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.6543450273126857, "acc_stderr": 0.03191864171781636, "acc_norm": 0.6545137141283983, "acc_norm_stderr": 0.03257628315307556, "mc1": 0.39167686658506734, "mc1_stderr": 0.01708779588176963, "mc2": 0.558695592929387, "mc2_stderr": 0.015276769304708891 }, "harness|arc:challenge|25": { "acc": 0.6390784982935154, "acc_stderr": 0.014034761386175456, "acc_norm": 0.6791808873720137, "acc_norm_stderr": 0.013640943091946533 }, "harness|hellaswag|10": { "acc": 0.6694881497709619, "acc_stderr": 0.004694360968929403, "acc_norm": 0.8598884684325832, "acc_norm_stderr": 0.003463933286063885 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.33, "acc_stderr": 0.04725815626252605, "acc_norm": 0.33, "acc_norm_stderr": 0.04725815626252605 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.6370370370370371, "acc_stderr": 0.04153948404742398, "acc_norm": 0.6370370370370371, "acc_norm_stderr": 0.04153948404742398 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.6907894736842105, "acc_stderr": 0.03761070869867479, "acc_norm": 0.6907894736842105, "acc_norm_stderr": 0.03761070869867479 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.62, "acc_stderr": 0.048783173121456316, "acc_norm": 0.62, "acc_norm_stderr": 0.048783173121456316 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.7320754716981132, "acc_stderr": 0.027257260322494845, "acc_norm": 0.7320754716981132, "acc_norm_stderr": 0.027257260322494845 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.7569444444444444, "acc_stderr": 0.035868792800803406, "acc_norm": 0.7569444444444444, "acc_norm_stderr": 0.035868792800803406 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.47, "acc_stderr": 0.050161355804659205, "acc_norm": 0.47, "acc_norm_stderr": 0.050161355804659205 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.55, "acc_stderr": 0.05, "acc_norm": 0.55, "acc_norm_stderr": 0.05 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.33, "acc_stderr": 0.04725815626252604, "acc_norm": 0.33, "acc_norm_stderr": 0.04725815626252604 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.6763005780346821, "acc_stderr": 0.035676037996391706, "acc_norm": 0.6763005780346821, "acc_norm_stderr": 0.035676037996391706 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.43137254901960786, "acc_stderr": 0.04928099597287534, "acc_norm": 0.43137254901960786, "acc_norm_stderr": 0.04928099597287534 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.79, "acc_stderr": 0.04093601807403326, "acc_norm": 0.79, "acc_norm_stderr": 0.04093601807403326 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.6085106382978723, "acc_stderr": 0.03190701242326812, "acc_norm": 0.6085106382978723, "acc_norm_stderr": 0.03190701242326812 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.49122807017543857, "acc_stderr": 0.047028804320496165, "acc_norm": 0.49122807017543857, "acc_norm_stderr": 0.047028804320496165 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.5517241379310345, "acc_stderr": 0.04144311810878152, "acc_norm": 0.5517241379310345, "acc_norm_stderr": 0.04144311810878152 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.43915343915343913, "acc_stderr": 0.025559920550531003, "acc_norm": 0.43915343915343913, "acc_norm_stderr": 0.025559920550531003 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.49206349206349204, "acc_stderr": 0.044715725362943486, "acc_norm": 0.49206349206349204, "acc_norm_stderr": 0.044715725362943486 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.31, "acc_stderr": 0.04648231987117316, "acc_norm": 0.31, "acc_norm_stderr": 0.04648231987117316 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.7774193548387097, "acc_stderr": 0.023664216671642518, "acc_norm": 0.7774193548387097, "acc_norm_stderr": 0.023664216671642518 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.5123152709359606, "acc_stderr": 0.035169204442208966, "acc_norm": 0.5123152709359606, "acc_norm_stderr": 0.035169204442208966 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.69, "acc_stderr": 0.04648231987117316, "acc_norm": 0.69, "acc_norm_stderr": 0.04648231987117316 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.7818181818181819, "acc_stderr": 0.03225078108306289, "acc_norm": 0.7818181818181819, "acc_norm_stderr": 0.03225078108306289 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.7777777777777778, "acc_stderr": 0.029620227874790482, "acc_norm": 0.7777777777777778, "acc_norm_stderr": 0.029620227874790482 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.9119170984455959, "acc_stderr": 0.02045374660160103, "acc_norm": 0.9119170984455959, "acc_norm_stderr": 0.02045374660160103 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.6717948717948717, "acc_stderr": 0.023807633198657266, "acc_norm": 0.6717948717948717, "acc_norm_stderr": 0.023807633198657266 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.362962962962963, "acc_stderr": 0.02931820364520686, "acc_norm": 0.362962962962963, "acc_norm_stderr": 0.02931820364520686 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.680672268907563, "acc_stderr": 0.0302839955258844, "acc_norm": 0.680672268907563, "acc_norm_stderr": 0.0302839955258844 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.32450331125827814, "acc_stderr": 0.03822746937658752, "acc_norm": 0.32450331125827814, "acc_norm_stderr": 0.03822746937658752 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.8458715596330275, "acc_stderr": 0.015480826865374303, "acc_norm": 0.8458715596330275, "acc_norm_stderr": 0.015480826865374303 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.5370370370370371, "acc_stderr": 0.03400603625538272, "acc_norm": 0.5370370370370371, "acc_norm_stderr": 0.03400603625538272 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.8284313725490197, "acc_stderr": 0.02646056956124064, "acc_norm": 0.8284313725490197, "acc_norm_stderr": 0.02646056956124064 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.8059071729957806, "acc_stderr": 0.025744902532290895, "acc_norm": 0.8059071729957806, "acc_norm_stderr": 0.025744902532290895 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.6860986547085202, "acc_stderr": 0.031146796482972465, "acc_norm": 0.6860986547085202, "acc_norm_stderr": 0.031146796482972465 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.7786259541984732, "acc_stderr": 0.03641297081313729, "acc_norm": 0.7786259541984732, "acc_norm_stderr": 0.03641297081313729 }, "harness|hendrycksTest-international_law|5": { "acc": 0.8016528925619835, "acc_stderr": 0.03640118271990947, "acc_norm": 0.8016528925619835, "acc_norm_stderr": 0.03640118271990947 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.8333333333333334, "acc_stderr": 0.036028141763926456, "acc_norm": 0.8333333333333334, "acc_norm_stderr": 0.036028141763926456 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.7668711656441718, "acc_stderr": 0.0332201579577674, "acc_norm": 0.7668711656441718, "acc_norm_stderr": 0.0332201579577674 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.44642857142857145, "acc_stderr": 0.04718471485219588, "acc_norm": 0.44642857142857145, "acc_norm_stderr": 0.04718471485219588 }, "harness|hendrycksTest-management|5": { "acc": 0.8058252427184466, "acc_stderr": 0.039166677628225836, "acc_norm": 0.8058252427184466, "acc_norm_stderr": 0.039166677628225836 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8846153846153846, "acc_stderr": 0.02093019318517933, "acc_norm": 0.8846153846153846, "acc_norm_stderr": 0.02093019318517933 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.74, "acc_stderr": 0.04408440022768078, "acc_norm": 0.74, "acc_norm_stderr": 0.04408440022768078 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.8301404853128991, "acc_stderr": 0.013428186370608308, "acc_norm": 0.8301404853128991, "acc_norm_stderr": 0.013428186370608308 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.7427745664739884, "acc_stderr": 0.023532925431044287, "acc_norm": 0.7427745664739884, "acc_norm_stderr": 0.023532925431044287 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.4212290502793296, "acc_stderr": 0.01651367603117959, "acc_norm": 0.4212290502793296, "acc_norm_stderr": 0.01651367603117959 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.7287581699346405, "acc_stderr": 0.02545775669666788, "acc_norm": 0.7287581699346405, "acc_norm_stderr": 0.02545775669666788 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.7041800643086816, "acc_stderr": 0.025922371788818767, "acc_norm": 0.7041800643086816, "acc_norm_stderr": 0.025922371788818767 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.7469135802469136, "acc_stderr": 0.024191808600712995, "acc_norm": 0.7469135802469136, "acc_norm_stderr": 0.024191808600712995 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.46808510638297873, "acc_stderr": 0.029766675075873866, "acc_norm": 0.46808510638297873, "acc_norm_stderr": 0.029766675075873866 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.4680573663624511, "acc_stderr": 0.012744149704869647, "acc_norm": 0.4680573663624511, "acc_norm_stderr": 0.012744149704869647 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.6727941176470589, "acc_stderr": 0.028501452860396556, "acc_norm": 0.6727941176470589, "acc_norm_stderr": 0.028501452860396556 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.6699346405228758, "acc_stderr": 0.019023726160724553, "acc_norm": 0.6699346405228758, "acc_norm_stderr": 0.019023726160724553 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.6545454545454545, "acc_stderr": 0.04554619617541054, "acc_norm": 0.6545454545454545, "acc_norm_stderr": 0.04554619617541054 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.7224489795918367, "acc_stderr": 0.02866685779027465, "acc_norm": 0.7224489795918367, "acc_norm_stderr": 0.02866685779027465 }, "harness|hendrycksTest-sociology|5": { "acc": 0.8557213930348259, "acc_stderr": 0.024845753212306053, "acc_norm": 0.8557213930348259, "acc_norm_stderr": 0.024845753212306053 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.86, "acc_stderr": 0.03487350880197769, "acc_norm": 0.86, "acc_norm_stderr": 0.03487350880197769 }, "harness|hendrycksTest-virology|5": { "acc": 0.5421686746987951, "acc_stderr": 0.0387862677100236, "acc_norm": 0.5421686746987951, "acc_norm_stderr": 0.0387862677100236 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8304093567251462, "acc_stderr": 0.02878210810540171, "acc_norm": 0.8304093567251462, "acc_norm_stderr": 0.02878210810540171 }, "harness|truthfulqa:mc|0": { "mc1": 0.39167686658506734, "mc1_stderr": 0.01708779588176963, "mc2": 0.558695592929387, "mc2_stderr": 0.015276769304708891 }, "harness|winogrande|5": { "acc": 0.8089976322020521, "acc_stderr": 0.011047808761510427 }, "harness|gsm8k|5": { "acc": 0.7202426080363912, "acc_stderr": 0.012364384016735319 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_rwitz2__ipo-test
[ "region:us" ]
2023-12-12T03:56:24+00:00
{"pretty_name": "Evaluation run of rwitz2/ipo-test", "dataset_summary": "Dataset automatically created during the evaluation run of model [rwitz2/ipo-test](https://huggingface.co/rwitz2/ipo-test) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_rwitz2__ipo-test\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T03:53:21.138621](https://huggingface.co/datasets/open-llm-leaderboard/details_rwitz2__ipo-test/blob/main/results_2023-12-12T03-53-21.138621.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.6543450273126857,\n \"acc_stderr\": 0.03191864171781636,\n \"acc_norm\": 0.6545137141283983,\n \"acc_norm_stderr\": 0.03257628315307556,\n \"mc1\": 0.39167686658506734,\n \"mc1_stderr\": 0.01708779588176963,\n \"mc2\": 0.558695592929387,\n \"mc2_stderr\": 0.015276769304708891\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.6390784982935154,\n \"acc_stderr\": 0.014034761386175456,\n \"acc_norm\": 0.6791808873720137,\n \"acc_norm_stderr\": 0.013640943091946533\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6694881497709619,\n \"acc_stderr\": 0.004694360968929403,\n \"acc_norm\": 0.8598884684325832,\n \"acc_norm_stderr\": 0.003463933286063885\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.33,\n \"acc_stderr\": 0.04725815626252605,\n \"acc_norm\": 0.33,\n \"acc_norm_stderr\": 0.04725815626252605\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.6370370370370371,\n \"acc_stderr\": 0.04153948404742398,\n \"acc_norm\": 0.6370370370370371,\n \"acc_norm_stderr\": 0.04153948404742398\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.6907894736842105,\n \"acc_stderr\": 0.03761070869867479,\n \"acc_norm\": 0.6907894736842105,\n \"acc_norm_stderr\": 0.03761070869867479\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.62,\n \"acc_stderr\": 0.048783173121456316,\n \"acc_norm\": 0.62,\n \"acc_norm_stderr\": 0.048783173121456316\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.7320754716981132,\n \"acc_stderr\": 0.027257260322494845,\n \"acc_norm\": 0.7320754716981132,\n \"acc_norm_stderr\": 0.027257260322494845\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.7569444444444444,\n \"acc_stderr\": 0.035868792800803406,\n \"acc_norm\": 0.7569444444444444,\n \"acc_norm_stderr\": 0.035868792800803406\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.47,\n \"acc_stderr\": 0.050161355804659205,\n \"acc_norm\": 0.47,\n \"acc_norm_stderr\": 0.050161355804659205\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.55,\n \"acc_stderr\": 0.05,\n \"acc_norm\": 0.55,\n \"acc_norm_stderr\": 0.05\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.33,\n \"acc_stderr\": 0.04725815626252604,\n \"acc_norm\": 0.33,\n \"acc_norm_stderr\": 0.04725815626252604\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.6763005780346821,\n \"acc_stderr\": 0.035676037996391706,\n \"acc_norm\": 0.6763005780346821,\n \"acc_norm_stderr\": 0.035676037996391706\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.43137254901960786,\n \"acc_stderr\": 0.04928099597287534,\n \"acc_norm\": 0.43137254901960786,\n \"acc_norm_stderr\": 0.04928099597287534\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.79,\n \"acc_stderr\": 0.04093601807403326,\n \"acc_norm\": 0.79,\n \"acc_norm_stderr\": 0.04093601807403326\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.6085106382978723,\n \"acc_stderr\": 0.03190701242326812,\n \"acc_norm\": 0.6085106382978723,\n \"acc_norm_stderr\": 0.03190701242326812\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.49122807017543857,\n \"acc_stderr\": 0.047028804320496165,\n \"acc_norm\": 0.49122807017543857,\n \"acc_norm_stderr\": 0.047028804320496165\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5517241379310345,\n \"acc_stderr\": 0.04144311810878152,\n \"acc_norm\": 0.5517241379310345,\n \"acc_norm_stderr\": 0.04144311810878152\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.43915343915343913,\n \"acc_stderr\": 0.025559920550531003,\n \"acc_norm\": 0.43915343915343913,\n \"acc_norm_stderr\": 0.025559920550531003\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.49206349206349204,\n \"acc_stderr\": 0.044715725362943486,\n \"acc_norm\": 0.49206349206349204,\n \"acc_norm_stderr\": 0.044715725362943486\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.31,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.31,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.7774193548387097,\n \"acc_stderr\": 0.023664216671642518,\n \"acc_norm\": 0.7774193548387097,\n \"acc_norm_stderr\": 0.023664216671642518\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.5123152709359606,\n \"acc_stderr\": 0.035169204442208966,\n \"acc_norm\": 0.5123152709359606,\n \"acc_norm_stderr\": 0.035169204442208966\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.69,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.69,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7818181818181819,\n \"acc_stderr\": 0.03225078108306289,\n \"acc_norm\": 0.7818181818181819,\n \"acc_norm_stderr\": 0.03225078108306289\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.7777777777777778,\n \"acc_stderr\": 0.029620227874790482,\n \"acc_norm\": 0.7777777777777778,\n \"acc_norm_stderr\": 0.029620227874790482\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.9119170984455959,\n \"acc_stderr\": 0.02045374660160103,\n \"acc_norm\": 0.9119170984455959,\n \"acc_norm_stderr\": 0.02045374660160103\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.6717948717948717,\n \"acc_stderr\": 0.023807633198657266,\n \"acc_norm\": 0.6717948717948717,\n \"acc_norm_stderr\": 0.023807633198657266\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.362962962962963,\n \"acc_stderr\": 0.02931820364520686,\n \"acc_norm\": 0.362962962962963,\n \"acc_norm_stderr\": 0.02931820364520686\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.680672268907563,\n \"acc_stderr\": 0.0302839955258844,\n \"acc_norm\": 0.680672268907563,\n \"acc_norm_stderr\": 0.0302839955258844\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.32450331125827814,\n \"acc_stderr\": 0.03822746937658752,\n \"acc_norm\": 0.32450331125827814,\n \"acc_norm_stderr\": 0.03822746937658752\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.8458715596330275,\n \"acc_stderr\": 0.015480826865374303,\n \"acc_norm\": 0.8458715596330275,\n \"acc_norm_stderr\": 0.015480826865374303\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.5370370370370371,\n \"acc_stderr\": 0.03400603625538272,\n \"acc_norm\": 0.5370370370370371,\n \"acc_norm_stderr\": 0.03400603625538272\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.8284313725490197,\n \"acc_stderr\": 0.02646056956124064,\n \"acc_norm\": 0.8284313725490197,\n \"acc_norm_stderr\": 0.02646056956124064\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.8059071729957806,\n \"acc_stderr\": 0.025744902532290895,\n \"acc_norm\": 0.8059071729957806,\n \"acc_norm_stderr\": 0.025744902532290895\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6860986547085202,\n \"acc_stderr\": 0.031146796482972465,\n \"acc_norm\": 0.6860986547085202,\n \"acc_norm_stderr\": 0.031146796482972465\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.7786259541984732,\n \"acc_stderr\": 0.03641297081313729,\n \"acc_norm\": 0.7786259541984732,\n \"acc_norm_stderr\": 0.03641297081313729\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.8016528925619835,\n \"acc_stderr\": 0.03640118271990947,\n \"acc_norm\": 0.8016528925619835,\n \"acc_norm_stderr\": 0.03640118271990947\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.8333333333333334,\n \"acc_stderr\": 0.036028141763926456,\n \"acc_norm\": 0.8333333333333334,\n \"acc_norm_stderr\": 0.036028141763926456\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.7668711656441718,\n \"acc_stderr\": 0.0332201579577674,\n \"acc_norm\": 0.7668711656441718,\n \"acc_norm_stderr\": 0.0332201579577674\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.44642857142857145,\n \"acc_stderr\": 0.04718471485219588,\n \"acc_norm\": 0.44642857142857145,\n \"acc_norm_stderr\": 0.04718471485219588\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.8058252427184466,\n \"acc_stderr\": 0.039166677628225836,\n \"acc_norm\": 0.8058252427184466,\n \"acc_norm_stderr\": 0.039166677628225836\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8846153846153846,\n \"acc_stderr\": 0.02093019318517933,\n \"acc_norm\": 0.8846153846153846,\n \"acc_norm_stderr\": 0.02093019318517933\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.74,\n \"acc_stderr\": 0.04408440022768078,\n \"acc_norm\": 0.74,\n \"acc_norm_stderr\": 0.04408440022768078\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.8301404853128991,\n \"acc_stderr\": 0.013428186370608308,\n \"acc_norm\": 0.8301404853128991,\n \"acc_norm_stderr\": 0.013428186370608308\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.7427745664739884,\n \"acc_stderr\": 0.023532925431044287,\n \"acc_norm\": 0.7427745664739884,\n \"acc_norm_stderr\": 0.023532925431044287\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.4212290502793296,\n \"acc_stderr\": 0.01651367603117959,\n \"acc_norm\": 0.4212290502793296,\n \"acc_norm_stderr\": 0.01651367603117959\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.7287581699346405,\n \"acc_stderr\": 0.02545775669666788,\n \"acc_norm\": 0.7287581699346405,\n \"acc_norm_stderr\": 0.02545775669666788\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.7041800643086816,\n \"acc_stderr\": 0.025922371788818767,\n \"acc_norm\": 0.7041800643086816,\n \"acc_norm_stderr\": 0.025922371788818767\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.7469135802469136,\n \"acc_stderr\": 0.024191808600712995,\n \"acc_norm\": 0.7469135802469136,\n \"acc_norm_stderr\": 0.024191808600712995\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.46808510638297873,\n \"acc_stderr\": 0.029766675075873866,\n \"acc_norm\": 0.46808510638297873,\n \"acc_norm_stderr\": 0.029766675075873866\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.4680573663624511,\n \"acc_stderr\": 0.012744149704869647,\n \"acc_norm\": 0.4680573663624511,\n \"acc_norm_stderr\": 0.012744149704869647\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.6727941176470589,\n \"acc_stderr\": 0.028501452860396556,\n \"acc_norm\": 0.6727941176470589,\n \"acc_norm_stderr\": 0.028501452860396556\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.6699346405228758,\n \"acc_stderr\": 0.019023726160724553,\n \"acc_norm\": 0.6699346405228758,\n \"acc_norm_stderr\": 0.019023726160724553\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6545454545454545,\n \"acc_stderr\": 0.04554619617541054,\n \"acc_norm\": 0.6545454545454545,\n \"acc_norm_stderr\": 0.04554619617541054\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.7224489795918367,\n \"acc_stderr\": 0.02866685779027465,\n \"acc_norm\": 0.7224489795918367,\n \"acc_norm_stderr\": 0.02866685779027465\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.8557213930348259,\n \"acc_stderr\": 0.024845753212306053,\n \"acc_norm\": 0.8557213930348259,\n \"acc_norm_stderr\": 0.024845753212306053\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.86,\n \"acc_stderr\": 0.03487350880197769,\n \"acc_norm\": 0.86,\n \"acc_norm_stderr\": 0.03487350880197769\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.5421686746987951,\n \"acc_stderr\": 0.0387862677100236,\n \"acc_norm\": 0.5421686746987951,\n \"acc_norm_stderr\": 0.0387862677100236\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8304093567251462,\n \"acc_stderr\": 0.02878210810540171,\n \"acc_norm\": 0.8304093567251462,\n \"acc_norm_stderr\": 0.02878210810540171\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.39167686658506734,\n \"mc1_stderr\": 0.01708779588176963,\n \"mc2\": 0.558695592929387,\n \"mc2_stderr\": 0.015276769304708891\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.8089976322020521,\n \"acc_stderr\": 0.011047808761510427\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.7202426080363912,\n \"acc_stderr\": 0.012364384016735319\n }\n}\n```", "repo_url": "https://huggingface.co/rwitz2/ipo-test", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|arc:challenge|25_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|gsm8k|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hellaswag|10_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T03-53-21.138621.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["**/details_harness|winogrande|5_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T03-53-21.138621.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T03_53_21.138621", "path": ["results_2023-12-12T03-53-21.138621.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T03-53-21.138621.parquet"]}]}]}
2023-12-12T03:57:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of rwitz2/ipo-test Dataset automatically created during the evaluation run of model rwitz2/ipo-test on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T03:53:21.138621(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of rwitz2/ipo-test\n\n\n\nDataset automatically created during the evaluation run of model rwitz2/ipo-test on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T03:53:21.138621(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of rwitz2/ipo-test\n\n\n\nDataset automatically created during the evaluation run of model rwitz2/ipo-test on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T03:53:21.138621(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 175, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of rwitz2/ipo-test\n\n\n\nDataset automatically created during the evaluation run of model rwitz2/ipo-test on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T03:53:21.138621(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
229ac5e9ee3a0832d9af349e514c73ec842f49c1
# Dataset Card for Evaluation run of Felladrin/TinyMistral-248M-SFT-v4 <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [Felladrin/TinyMistral-248M-SFT-v4](https://huggingface.co/Felladrin/TinyMistral-248M-SFT-v4) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_Felladrin__TinyMistral-248M-SFT-v4", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T04:15:32.627780](https://huggingface.co/datasets/open-llm-leaderboard/details_Felladrin__TinyMistral-248M-SFT-v4/blob/main/results_2023-12-12T04-15-32.627780.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.25943849313327083, "acc_stderr": 0.03081669921999169, "acc_norm": 0.26059009573086195, "acc_norm_stderr": 0.03163906495514162, "mc1": 0.20807833537331702, "mc1_stderr": 0.014210503473576618, "mc2": 0.3956118679297354, "mc2_stderr": 0.01494264576082401 }, "harness|arc:challenge|25": { "acc": 0.2022184300341297, "acc_stderr": 0.011737454431872104, "acc_norm": 0.24914675767918087, "acc_norm_stderr": 0.012639407111926435 }, "harness|hellaswag|10": { "acc": 0.2742481577375025, "acc_stderr": 0.004452228541043549, "acc_norm": 0.2815176259709221, "acc_norm_stderr": 0.004488201756642581 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.25, "acc_stderr": 0.04351941398892446, "acc_norm": 0.25, "acc_norm_stderr": 0.04351941398892446 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.2, "acc_stderr": 0.034554737023254366, "acc_norm": 0.2, "acc_norm_stderr": 0.034554737023254366 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.2894736842105263, "acc_stderr": 0.036906779861372814, "acc_norm": 0.2894736842105263, "acc_norm_stderr": 0.036906779861372814 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.24, "acc_stderr": 0.04292346959909284, "acc_norm": 0.24, "acc_norm_stderr": 0.04292346959909284 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.2528301886792453, "acc_stderr": 0.02674989977124124, "acc_norm": 0.2528301886792453, "acc_norm_stderr": 0.02674989977124124 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.22916666666666666, "acc_stderr": 0.035146974678623884, "acc_norm": 0.22916666666666666, "acc_norm_stderr": 0.035146974678623884 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.29, "acc_stderr": 0.045604802157206845, "acc_norm": 0.29, "acc_norm_stderr": 0.045604802157206845 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.24, "acc_stderr": 0.04292346959909282, "acc_norm": 0.24, "acc_norm_stderr": 0.04292346959909282 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.3, "acc_stderr": 0.046056618647183814, "acc_norm": 0.3, "acc_norm_stderr": 0.046056618647183814 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.27167630057803466, "acc_stderr": 0.03391750322321659, "acc_norm": 0.27167630057803466, "acc_norm_stderr": 0.03391750322321659 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.2647058823529412, "acc_stderr": 0.04389869956808778, "acc_norm": 0.2647058823529412, "acc_norm_stderr": 0.04389869956808778 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.21, "acc_stderr": 0.04093601807403326, "acc_norm": 0.21, "acc_norm_stderr": 0.04093601807403326 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.30638297872340425, "acc_stderr": 0.03013590647851756, "acc_norm": 0.30638297872340425, "acc_norm_stderr": 0.03013590647851756 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.24561403508771928, "acc_stderr": 0.04049339297748141, "acc_norm": 0.24561403508771928, "acc_norm_stderr": 0.04049339297748141 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.22758620689655173, "acc_stderr": 0.03493950380131184, "acc_norm": 0.22758620689655173, "acc_norm_stderr": 0.03493950380131184 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.2724867724867725, "acc_stderr": 0.02293097307163335, "acc_norm": 0.2724867724867725, "acc_norm_stderr": 0.02293097307163335 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.2857142857142857, "acc_stderr": 0.04040610178208841, "acc_norm": 0.2857142857142857, "acc_norm_stderr": 0.04040610178208841 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.31, "acc_stderr": 0.04648231987117316, "acc_norm": 0.31, "acc_norm_stderr": 0.04648231987117316 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.2806451612903226, "acc_stderr": 0.025560604721022902, "acc_norm": 0.2806451612903226, "acc_norm_stderr": 0.025560604721022902 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.2955665024630542, "acc_stderr": 0.03210494433751458, "acc_norm": 0.2955665024630542, "acc_norm_stderr": 0.03210494433751458 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.22, "acc_stderr": 0.04163331998932269, "acc_norm": 0.22, "acc_norm_stderr": 0.04163331998932269 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.23030303030303031, "acc_stderr": 0.0328766675860349, "acc_norm": 0.23030303030303031, "acc_norm_stderr": 0.0328766675860349 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.29797979797979796, "acc_stderr": 0.03258630383836557, "acc_norm": 0.29797979797979796, "acc_norm_stderr": 0.03258630383836557 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.39378238341968913, "acc_stderr": 0.03526077095548236, "acc_norm": 0.39378238341968913, "acc_norm_stderr": 0.03526077095548236 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.31794871794871793, "acc_stderr": 0.02361088430892786, "acc_norm": 0.31794871794871793, "acc_norm_stderr": 0.02361088430892786 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.2962962962962963, "acc_stderr": 0.027840811495871937, "acc_norm": 0.2962962962962963, "acc_norm_stderr": 0.027840811495871937 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.33613445378151263, "acc_stderr": 0.030684737115135363, "acc_norm": 0.33613445378151263, "acc_norm_stderr": 0.030684737115135363 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.25165562913907286, "acc_stderr": 0.035433042343899844, "acc_norm": 0.25165562913907286, "acc_norm_stderr": 0.035433042343899844 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.26238532110091745, "acc_stderr": 0.01886188502153473, "acc_norm": 0.26238532110091745, "acc_norm_stderr": 0.01886188502153473 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.39814814814814814, "acc_stderr": 0.03338473403207401, "acc_norm": 0.39814814814814814, "acc_norm_stderr": 0.03338473403207401 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.29901960784313725, "acc_stderr": 0.03213325717373617, "acc_norm": 0.29901960784313725, "acc_norm_stderr": 0.03213325717373617 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.21518987341772153, "acc_stderr": 0.02675082699467618, "acc_norm": 0.21518987341772153, "acc_norm_stderr": 0.02675082699467618 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.17040358744394618, "acc_stderr": 0.025234593447136165, "acc_norm": 0.17040358744394618, "acc_norm_stderr": 0.025234593447136165 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.19083969465648856, "acc_stderr": 0.03446513350752599, "acc_norm": 0.19083969465648856, "acc_norm_stderr": 0.03446513350752599 }, "harness|hendrycksTest-international_law|5": { "acc": 0.09917355371900827, "acc_stderr": 0.02728524631275895, "acc_norm": 0.09917355371900827, "acc_norm_stderr": 0.02728524631275895 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.21296296296296297, "acc_stderr": 0.0395783547198098, "acc_norm": 0.21296296296296297, "acc_norm_stderr": 0.0395783547198098 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.26380368098159507, "acc_stderr": 0.03462419931615623, "acc_norm": 0.26380368098159507, "acc_norm_stderr": 0.03462419931615623 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.29464285714285715, "acc_stderr": 0.043270409325787296, "acc_norm": 0.29464285714285715, "acc_norm_stderr": 0.043270409325787296 }, "harness|hendrycksTest-management|5": { "acc": 0.2912621359223301, "acc_stderr": 0.044986763205729224, "acc_norm": 0.2912621359223301, "acc_norm_stderr": 0.044986763205729224 }, "harness|hendrycksTest-marketing|5": { "acc": 0.23076923076923078, "acc_stderr": 0.02760192138141759, "acc_norm": 0.23076923076923078, "acc_norm_stderr": 0.02760192138141759 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.26, "acc_stderr": 0.04408440022768078, "acc_norm": 0.26, "acc_norm_stderr": 0.04408440022768078 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.27330779054916987, "acc_stderr": 0.015936681062628556, "acc_norm": 0.27330779054916987, "acc_norm_stderr": 0.015936681062628556 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.21965317919075145, "acc_stderr": 0.022289638852617904, "acc_norm": 0.21965317919075145, "acc_norm_stderr": 0.022289638852617904 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.2558659217877095, "acc_stderr": 0.014593620923210756, "acc_norm": 0.2558659217877095, "acc_norm_stderr": 0.014593620923210756 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.26143790849673204, "acc_stderr": 0.025160998214292456, "acc_norm": 0.26143790849673204, "acc_norm_stderr": 0.025160998214292456 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.24115755627009647, "acc_stderr": 0.02429659403476343, "acc_norm": 0.24115755627009647, "acc_norm_stderr": 0.02429659403476343 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.24382716049382716, "acc_stderr": 0.0238918795419596, "acc_norm": 0.24382716049382716, "acc_norm_stderr": 0.0238918795419596 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.23404255319148937, "acc_stderr": 0.025257861359432403, "acc_norm": 0.23404255319148937, "acc_norm_stderr": 0.025257861359432403 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.24837027379400262, "acc_stderr": 0.011035212598034503, "acc_norm": 0.24837027379400262, "acc_norm_stderr": 0.011035212598034503 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.4007352941176471, "acc_stderr": 0.029768263528933105, "acc_norm": 0.4007352941176471, "acc_norm_stderr": 0.029768263528933105 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.25980392156862747, "acc_stderr": 0.017740899509177798, "acc_norm": 0.25980392156862747, "acc_norm_stderr": 0.017740899509177798 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.22727272727272727, "acc_stderr": 0.04013964554072775, "acc_norm": 0.22727272727272727, "acc_norm_stderr": 0.04013964554072775 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.33877551020408164, "acc_stderr": 0.030299506562154185, "acc_norm": 0.33877551020408164, "acc_norm_stderr": 0.030299506562154185 }, "harness|hendrycksTest-sociology|5": { "acc": 0.27860696517412936, "acc_stderr": 0.03170056183497308, "acc_norm": 0.27860696517412936, "acc_norm_stderr": 0.03170056183497308 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.17, "acc_stderr": 0.03775251680686371, "acc_norm": 0.17, "acc_norm_stderr": 0.03775251680686371 }, "harness|hendrycksTest-virology|5": { "acc": 0.1686746987951807, "acc_stderr": 0.029152009627856544, "acc_norm": 0.1686746987951807, "acc_norm_stderr": 0.029152009627856544 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.22807017543859648, "acc_stderr": 0.03218093795602357, "acc_norm": 0.22807017543859648, "acc_norm_stderr": 0.03218093795602357 }, "harness|truthfulqa:mc|0": { "mc1": 0.20807833537331702, "mc1_stderr": 0.014210503473576618, "mc2": 0.3956118679297354, "mc2_stderr": 0.01494264576082401 }, "harness|winogrande|5": { "acc": 0.505130228887135, "acc_stderr": 0.014051745961790516 }, "harness|gsm8k|5": { "acc": 0.0, "acc_stderr": 0.0 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_Felladrin__TinyMistral-248M-SFT-v4
[ "region:us" ]
2023-12-12T04:18:24+00:00
{"pretty_name": "Evaluation run of Felladrin/TinyMistral-248M-SFT-v4", "dataset_summary": "Dataset automatically created during the evaluation run of model [Felladrin/TinyMistral-248M-SFT-v4](https://huggingface.co/Felladrin/TinyMistral-248M-SFT-v4) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_Felladrin__TinyMistral-248M-SFT-v4\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T04:15:32.627780](https://huggingface.co/datasets/open-llm-leaderboard/details_Felladrin__TinyMistral-248M-SFT-v4/blob/main/results_2023-12-12T04-15-32.627780.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.25943849313327083,\n \"acc_stderr\": 0.03081669921999169,\n \"acc_norm\": 0.26059009573086195,\n \"acc_norm_stderr\": 0.03163906495514162,\n \"mc1\": 0.20807833537331702,\n \"mc1_stderr\": 0.014210503473576618,\n \"mc2\": 0.3956118679297354,\n \"mc2_stderr\": 0.01494264576082401\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.2022184300341297,\n \"acc_stderr\": 0.011737454431872104,\n \"acc_norm\": 0.24914675767918087,\n \"acc_norm_stderr\": 0.012639407111926435\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.2742481577375025,\n \"acc_stderr\": 0.004452228541043549,\n \"acc_norm\": 0.2815176259709221,\n \"acc_norm_stderr\": 0.004488201756642581\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.25,\n \"acc_stderr\": 0.04351941398892446,\n \"acc_norm\": 0.25,\n \"acc_norm_stderr\": 0.04351941398892446\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.2,\n \"acc_stderr\": 0.034554737023254366,\n \"acc_norm\": 0.2,\n \"acc_norm_stderr\": 0.034554737023254366\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.2894736842105263,\n \"acc_stderr\": 0.036906779861372814,\n \"acc_norm\": 0.2894736842105263,\n \"acc_norm_stderr\": 0.036906779861372814\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.24,\n \"acc_stderr\": 0.04292346959909284,\n \"acc_norm\": 0.24,\n \"acc_norm_stderr\": 0.04292346959909284\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.2528301886792453,\n \"acc_stderr\": 0.02674989977124124,\n \"acc_norm\": 0.2528301886792453,\n \"acc_norm_stderr\": 0.02674989977124124\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.22916666666666666,\n \"acc_stderr\": 0.035146974678623884,\n \"acc_norm\": 0.22916666666666666,\n \"acc_norm_stderr\": 0.035146974678623884\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.29,\n \"acc_stderr\": 0.045604802157206845,\n \"acc_norm\": 0.29,\n \"acc_norm_stderr\": 0.045604802157206845\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.24,\n \"acc_stderr\": 0.04292346959909282,\n \"acc_norm\": 0.24,\n \"acc_norm_stderr\": 0.04292346959909282\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.3,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.3,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.27167630057803466,\n \"acc_stderr\": 0.03391750322321659,\n \"acc_norm\": 0.27167630057803466,\n \"acc_norm_stderr\": 0.03391750322321659\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.2647058823529412,\n \"acc_stderr\": 0.04389869956808778,\n \"acc_norm\": 0.2647058823529412,\n \"acc_norm_stderr\": 0.04389869956808778\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.21,\n \"acc_stderr\": 0.04093601807403326,\n \"acc_norm\": 0.21,\n \"acc_norm_stderr\": 0.04093601807403326\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.30638297872340425,\n \"acc_stderr\": 0.03013590647851756,\n \"acc_norm\": 0.30638297872340425,\n \"acc_norm_stderr\": 0.03013590647851756\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.24561403508771928,\n \"acc_stderr\": 0.04049339297748141,\n \"acc_norm\": 0.24561403508771928,\n \"acc_norm_stderr\": 0.04049339297748141\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.22758620689655173,\n \"acc_stderr\": 0.03493950380131184,\n \"acc_norm\": 0.22758620689655173,\n \"acc_norm_stderr\": 0.03493950380131184\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.2724867724867725,\n \"acc_stderr\": 0.02293097307163335,\n \"acc_norm\": 0.2724867724867725,\n \"acc_norm_stderr\": 0.02293097307163335\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.2857142857142857,\n \"acc_stderr\": 0.04040610178208841,\n \"acc_norm\": 0.2857142857142857,\n \"acc_norm_stderr\": 0.04040610178208841\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.31,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.31,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.2806451612903226,\n \"acc_stderr\": 0.025560604721022902,\n \"acc_norm\": 0.2806451612903226,\n \"acc_norm_stderr\": 0.025560604721022902\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.2955665024630542,\n \"acc_stderr\": 0.03210494433751458,\n \"acc_norm\": 0.2955665024630542,\n \"acc_norm_stderr\": 0.03210494433751458\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.22,\n \"acc_stderr\": 0.04163331998932269,\n \"acc_norm\": 0.22,\n \"acc_norm_stderr\": 0.04163331998932269\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.23030303030303031,\n \"acc_stderr\": 0.0328766675860349,\n \"acc_norm\": 0.23030303030303031,\n \"acc_norm_stderr\": 0.0328766675860349\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.29797979797979796,\n \"acc_stderr\": 0.03258630383836557,\n \"acc_norm\": 0.29797979797979796,\n \"acc_norm_stderr\": 0.03258630383836557\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.39378238341968913,\n \"acc_stderr\": 0.03526077095548236,\n \"acc_norm\": 0.39378238341968913,\n \"acc_norm_stderr\": 0.03526077095548236\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.31794871794871793,\n \"acc_stderr\": 0.02361088430892786,\n \"acc_norm\": 0.31794871794871793,\n \"acc_norm_stderr\": 0.02361088430892786\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.2962962962962963,\n \"acc_stderr\": 0.027840811495871937,\n \"acc_norm\": 0.2962962962962963,\n \"acc_norm_stderr\": 0.027840811495871937\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.33613445378151263,\n \"acc_stderr\": 0.030684737115135363,\n \"acc_norm\": 0.33613445378151263,\n \"acc_norm_stderr\": 0.030684737115135363\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.25165562913907286,\n \"acc_stderr\": 0.035433042343899844,\n \"acc_norm\": 0.25165562913907286,\n \"acc_norm_stderr\": 0.035433042343899844\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.26238532110091745,\n \"acc_stderr\": 0.01886188502153473,\n \"acc_norm\": 0.26238532110091745,\n \"acc_norm_stderr\": 0.01886188502153473\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.39814814814814814,\n \"acc_stderr\": 0.03338473403207401,\n \"acc_norm\": 0.39814814814814814,\n \"acc_norm_stderr\": 0.03338473403207401\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.29901960784313725,\n \"acc_stderr\": 0.03213325717373617,\n \"acc_norm\": 0.29901960784313725,\n \"acc_norm_stderr\": 0.03213325717373617\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.21518987341772153,\n \"acc_stderr\": 0.02675082699467618,\n \"acc_norm\": 0.21518987341772153,\n \"acc_norm_stderr\": 0.02675082699467618\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.17040358744394618,\n \"acc_stderr\": 0.025234593447136165,\n \"acc_norm\": 0.17040358744394618,\n \"acc_norm_stderr\": 0.025234593447136165\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.19083969465648856,\n \"acc_stderr\": 0.03446513350752599,\n \"acc_norm\": 0.19083969465648856,\n \"acc_norm_stderr\": 0.03446513350752599\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.09917355371900827,\n \"acc_stderr\": 0.02728524631275895,\n \"acc_norm\": 0.09917355371900827,\n \"acc_norm_stderr\": 0.02728524631275895\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.21296296296296297,\n \"acc_stderr\": 0.0395783547198098,\n \"acc_norm\": 0.21296296296296297,\n \"acc_norm_stderr\": 0.0395783547198098\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.26380368098159507,\n \"acc_stderr\": 0.03462419931615623,\n \"acc_norm\": 0.26380368098159507,\n \"acc_norm_stderr\": 0.03462419931615623\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.29464285714285715,\n \"acc_stderr\": 0.043270409325787296,\n \"acc_norm\": 0.29464285714285715,\n \"acc_norm_stderr\": 0.043270409325787296\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.2912621359223301,\n \"acc_stderr\": 0.044986763205729224,\n \"acc_norm\": 0.2912621359223301,\n \"acc_norm_stderr\": 0.044986763205729224\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.23076923076923078,\n \"acc_stderr\": 0.02760192138141759,\n \"acc_norm\": 0.23076923076923078,\n \"acc_norm_stderr\": 0.02760192138141759\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.26,\n \"acc_stderr\": 0.04408440022768078,\n \"acc_norm\": 0.26,\n \"acc_norm_stderr\": 0.04408440022768078\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.27330779054916987,\n \"acc_stderr\": 0.015936681062628556,\n \"acc_norm\": 0.27330779054916987,\n \"acc_norm_stderr\": 0.015936681062628556\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.21965317919075145,\n \"acc_stderr\": 0.022289638852617904,\n \"acc_norm\": 0.21965317919075145,\n \"acc_norm_stderr\": 0.022289638852617904\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.2558659217877095,\n \"acc_stderr\": 0.014593620923210756,\n \"acc_norm\": 0.2558659217877095,\n \"acc_norm_stderr\": 0.014593620923210756\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.26143790849673204,\n \"acc_stderr\": 0.025160998214292456,\n \"acc_norm\": 0.26143790849673204,\n \"acc_norm_stderr\": 0.025160998214292456\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.24115755627009647,\n \"acc_stderr\": 0.02429659403476343,\n \"acc_norm\": 0.24115755627009647,\n \"acc_norm_stderr\": 0.02429659403476343\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.24382716049382716,\n \"acc_stderr\": 0.0238918795419596,\n \"acc_norm\": 0.24382716049382716,\n \"acc_norm_stderr\": 0.0238918795419596\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.23404255319148937,\n \"acc_stderr\": 0.025257861359432403,\n \"acc_norm\": 0.23404255319148937,\n \"acc_norm_stderr\": 0.025257861359432403\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.24837027379400262,\n \"acc_stderr\": 0.011035212598034503,\n \"acc_norm\": 0.24837027379400262,\n \"acc_norm_stderr\": 0.011035212598034503\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.4007352941176471,\n \"acc_stderr\": 0.029768263528933105,\n \"acc_norm\": 0.4007352941176471,\n \"acc_norm_stderr\": 0.029768263528933105\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.25980392156862747,\n \"acc_stderr\": 0.017740899509177798,\n \"acc_norm\": 0.25980392156862747,\n \"acc_norm_stderr\": 0.017740899509177798\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.22727272727272727,\n \"acc_stderr\": 0.04013964554072775,\n \"acc_norm\": 0.22727272727272727,\n \"acc_norm_stderr\": 0.04013964554072775\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.33877551020408164,\n \"acc_stderr\": 0.030299506562154185,\n \"acc_norm\": 0.33877551020408164,\n \"acc_norm_stderr\": 0.030299506562154185\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.27860696517412936,\n \"acc_stderr\": 0.03170056183497308,\n \"acc_norm\": 0.27860696517412936,\n \"acc_norm_stderr\": 0.03170056183497308\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.17,\n \"acc_stderr\": 0.03775251680686371,\n \"acc_norm\": 0.17,\n \"acc_norm_stderr\": 0.03775251680686371\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.1686746987951807,\n \"acc_stderr\": 0.029152009627856544,\n \"acc_norm\": 0.1686746987951807,\n \"acc_norm_stderr\": 0.029152009627856544\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.22807017543859648,\n \"acc_stderr\": 0.03218093795602357,\n \"acc_norm\": 0.22807017543859648,\n \"acc_norm_stderr\": 0.03218093795602357\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.20807833537331702,\n \"mc1_stderr\": 0.014210503473576618,\n \"mc2\": 0.3956118679297354,\n \"mc2_stderr\": 0.01494264576082401\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.505130228887135,\n \"acc_stderr\": 0.014051745961790516\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.0,\n \"acc_stderr\": 0.0\n }\n}\n```", "repo_url": "https://huggingface.co/Felladrin/TinyMistral-248M-SFT-v4", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|arc:challenge|25_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|gsm8k|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hellaswag|10_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T04-15-32.627780.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["**/details_harness|winogrande|5_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T04-15-32.627780.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T04_15_32.627780", "path": ["results_2023-12-12T04-15-32.627780.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T04-15-32.627780.parquet"]}]}]}
2023-12-12T04:19:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of Felladrin/TinyMistral-248M-SFT-v4 Dataset automatically created during the evaluation run of model Felladrin/TinyMistral-248M-SFT-v4 on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T04:15:32.627780(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of Felladrin/TinyMistral-248M-SFT-v4\n\n\n\nDataset automatically created during the evaluation run of model Felladrin/TinyMistral-248M-SFT-v4 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T04:15:32.627780(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of Felladrin/TinyMistral-248M-SFT-v4\n\n\n\nDataset automatically created during the evaluation run of model Felladrin/TinyMistral-248M-SFT-v4 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T04:15:32.627780(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 195, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of Felladrin/TinyMistral-248M-SFT-v4\n\n\n\nDataset automatically created during the evaluation run of model Felladrin/TinyMistral-248M-SFT-v4 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T04:15:32.627780(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]" ]
db9b9b2b82ce425d756ec9619aea06274f88b634
# Dataset Card for Evaluation run of Sao10K/NyakuraV2.1-m7 <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [Sao10K/NyakuraV2.1-m7](https://huggingface.co/Sao10K/NyakuraV2.1-m7) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_Sao10K__NyakuraV2.1-m7", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T04:30:54.576577](https://huggingface.co/datasets/open-llm-leaderboard/details_Sao10K__NyakuraV2.1-m7/blob/main/results_2023-12-12T04-30-54.576577.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.5812856791159661, "acc_stderr": 0.03351473539841468, "acc_norm": 0.5885734680789351, "acc_norm_stderr": 0.03422448074980651, "mc1": 0.29498164014687883, "mc1_stderr": 0.015964400965589664, "mc2": 0.45008851442315223, "mc2_stderr": 0.015144388624059283 }, "harness|arc:challenge|25": { "acc": 0.5511945392491467, "acc_stderr": 0.014534599585097662, "acc_norm": 0.5861774744027304, "acc_norm_stderr": 0.014392730009221007 }, "harness|hellaswag|10": { "acc": 0.6320454092810197, "acc_stderr": 0.004812633280078261, "acc_norm": 0.8188607847042422, "acc_norm_stderr": 0.003843463792037909 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.28, "acc_stderr": 0.04512608598542128, "acc_norm": 0.28, "acc_norm_stderr": 0.04512608598542128 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.5777777777777777, "acc_stderr": 0.04266763404099582, "acc_norm": 0.5777777777777777, "acc_norm_stderr": 0.04266763404099582 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.6118421052631579, "acc_stderr": 0.03965842097512744, "acc_norm": 0.6118421052631579, "acc_norm_stderr": 0.03965842097512744 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.58, "acc_stderr": 0.049604496374885836, "acc_norm": 0.58, "acc_norm_stderr": 0.049604496374885836 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.630188679245283, "acc_stderr": 0.02971142188010793, "acc_norm": 0.630188679245283, "acc_norm_stderr": 0.02971142188010793 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.6041666666666666, "acc_stderr": 0.04089465449325582, "acc_norm": 0.6041666666666666, "acc_norm_stderr": 0.04089465449325582 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.41, "acc_stderr": 0.04943110704237102, "acc_norm": 0.41, "acc_norm_stderr": 0.04943110704237102 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.54, "acc_stderr": 0.05009082659620333, "acc_norm": 0.54, "acc_norm_stderr": 0.05009082659620333 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.33, "acc_stderr": 0.047258156262526045, "acc_norm": 0.33, "acc_norm_stderr": 0.047258156262526045 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.5780346820809249, "acc_stderr": 0.0376574669386515, "acc_norm": 0.5780346820809249, "acc_norm_stderr": 0.0376574669386515 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.3333333333333333, "acc_stderr": 0.04690650298201943, "acc_norm": 0.3333333333333333, "acc_norm_stderr": 0.04690650298201943 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.71, "acc_stderr": 0.045604802157206845, "acc_norm": 0.71, "acc_norm_stderr": 0.045604802157206845 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.4595744680851064, "acc_stderr": 0.03257901482099834, "acc_norm": 0.4595744680851064, "acc_norm_stderr": 0.03257901482099834 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.4473684210526316, "acc_stderr": 0.04677473004491199, "acc_norm": 0.4473684210526316, "acc_norm_stderr": 0.04677473004491199 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.5241379310344828, "acc_stderr": 0.0416180850350153, "acc_norm": 0.5241379310344828, "acc_norm_stderr": 0.0416180850350153 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.373015873015873, "acc_stderr": 0.02490699045899257, "acc_norm": 0.373015873015873, "acc_norm_stderr": 0.02490699045899257 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.3888888888888889, "acc_stderr": 0.04360314860077459, "acc_norm": 0.3888888888888889, "acc_norm_stderr": 0.04360314860077459 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.33, "acc_stderr": 0.047258156262526045, "acc_norm": 0.33, "acc_norm_stderr": 0.047258156262526045 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.6838709677419355, "acc_stderr": 0.026450874489042774, "acc_norm": 0.6838709677419355, "acc_norm_stderr": 0.026450874489042774 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.4827586206896552, "acc_stderr": 0.035158955511656986, "acc_norm": 0.4827586206896552, "acc_norm_stderr": 0.035158955511656986 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.68, "acc_stderr": 0.04688261722621504, "acc_norm": 0.68, "acc_norm_stderr": 0.04688261722621504 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.7272727272727273, "acc_stderr": 0.0347769116216366, "acc_norm": 0.7272727272727273, "acc_norm_stderr": 0.0347769116216366 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.702020202020202, "acc_stderr": 0.03258630383836556, "acc_norm": 0.702020202020202, "acc_norm_stderr": 0.03258630383836556 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.8290155440414507, "acc_stderr": 0.027171213683164542, "acc_norm": 0.8290155440414507, "acc_norm_stderr": 0.027171213683164542 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.5461538461538461, "acc_stderr": 0.025242770987126184, "acc_norm": 0.5461538461538461, "acc_norm_stderr": 0.025242770987126184 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.3296296296296296, "acc_stderr": 0.02866120111652459, "acc_norm": 0.3296296296296296, "acc_norm_stderr": 0.02866120111652459 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.5882352941176471, "acc_stderr": 0.031968769891957786, "acc_norm": 0.5882352941176471, "acc_norm_stderr": 0.031968769891957786 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.39072847682119205, "acc_stderr": 0.03983798306659807, "acc_norm": 0.39072847682119205, "acc_norm_stderr": 0.03983798306659807 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.726605504587156, "acc_stderr": 0.019109299846098292, "acc_norm": 0.726605504587156, "acc_norm_stderr": 0.019109299846098292 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.4537037037037037, "acc_stderr": 0.03395322726375797, "acc_norm": 0.4537037037037037, "acc_norm_stderr": 0.03395322726375797 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.7205882352941176, "acc_stderr": 0.03149328104507957, "acc_norm": 0.7205882352941176, "acc_norm_stderr": 0.03149328104507957 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.7172995780590717, "acc_stderr": 0.029312814153955924, "acc_norm": 0.7172995780590717, "acc_norm_stderr": 0.029312814153955924 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.6547085201793722, "acc_stderr": 0.03191100192835794, "acc_norm": 0.6547085201793722, "acc_norm_stderr": 0.03191100192835794 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.6946564885496184, "acc_stderr": 0.04039314978724561, "acc_norm": 0.6946564885496184, "acc_norm_stderr": 0.04039314978724561 }, "harness|hendrycksTest-international_law|5": { "acc": 0.743801652892562, "acc_stderr": 0.03984979653302871, "acc_norm": 0.743801652892562, "acc_norm_stderr": 0.03984979653302871 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.6666666666666666, "acc_stderr": 0.04557239513497751, "acc_norm": 0.6666666666666666, "acc_norm_stderr": 0.04557239513497751 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.7361963190184049, "acc_stderr": 0.03462419931615623, "acc_norm": 0.7361963190184049, "acc_norm_stderr": 0.03462419931615623 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.5178571428571429, "acc_stderr": 0.04742762361243011, "acc_norm": 0.5178571428571429, "acc_norm_stderr": 0.04742762361243011 }, "harness|hendrycksTest-management|5": { "acc": 0.7766990291262136, "acc_stderr": 0.04123553189891431, "acc_norm": 0.7766990291262136, "acc_norm_stderr": 0.04123553189891431 }, "harness|hendrycksTest-marketing|5": { "acc": 0.7905982905982906, "acc_stderr": 0.026655699653922737, "acc_norm": 0.7905982905982906, "acc_norm_stderr": 0.026655699653922737 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.67, "acc_stderr": 0.04725815626252607, "acc_norm": 0.67, "acc_norm_stderr": 0.04725815626252607 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.7624521072796935, "acc_stderr": 0.015218733046150193, "acc_norm": 0.7624521072796935, "acc_norm_stderr": 0.015218733046150193 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.6473988439306358, "acc_stderr": 0.02572280220089581, "acc_norm": 0.6473988439306358, "acc_norm_stderr": 0.02572280220089581 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.25139664804469275, "acc_stderr": 0.01450897945355397, "acc_norm": 0.25139664804469275, "acc_norm_stderr": 0.01450897945355397 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.6470588235294118, "acc_stderr": 0.027363593284684965, "acc_norm": 0.6470588235294118, "acc_norm_stderr": 0.027363593284684965 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.6591639871382636, "acc_stderr": 0.026920841260776165, "acc_norm": 0.6591639871382636, "acc_norm_stderr": 0.026920841260776165 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.6512345679012346, "acc_stderr": 0.02651759772446501, "acc_norm": 0.6512345679012346, "acc_norm_stderr": 0.02651759772446501 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.42907801418439717, "acc_stderr": 0.02952591430255856, "acc_norm": 0.42907801418439717, "acc_norm_stderr": 0.02952591430255856 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.40547588005215124, "acc_stderr": 0.012539960672377202, "acc_norm": 0.40547588005215124, "acc_norm_stderr": 0.012539960672377202 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.5661764705882353, "acc_stderr": 0.030105636570016633, "acc_norm": 0.5661764705882353, "acc_norm_stderr": 0.030105636570016633 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.5931372549019608, "acc_stderr": 0.019873802005061177, "acc_norm": 0.5931372549019608, "acc_norm_stderr": 0.019873802005061177 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.6272727272727273, "acc_stderr": 0.04631381319425465, "acc_norm": 0.6272727272727273, "acc_norm_stderr": 0.04631381319425465 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.673469387755102, "acc_stderr": 0.030021056238440307, "acc_norm": 0.673469387755102, "acc_norm_stderr": 0.030021056238440307 }, "harness|hendrycksTest-sociology|5": { "acc": 0.835820895522388, "acc_stderr": 0.02619392354445414, "acc_norm": 0.835820895522388, "acc_norm_stderr": 0.02619392354445414 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.81, "acc_stderr": 0.03942772444036624, "acc_norm": 0.81, "acc_norm_stderr": 0.03942772444036624 }, "harness|hendrycksTest-virology|5": { "acc": 0.46987951807228917, "acc_stderr": 0.03885425420866766, "acc_norm": 0.46987951807228917, "acc_norm_stderr": 0.03885425420866766 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.7543859649122807, "acc_stderr": 0.0330140594698725, "acc_norm": 0.7543859649122807, "acc_norm_stderr": 0.0330140594698725 }, "harness|truthfulqa:mc|0": { "mc1": 0.29498164014687883, "mc1_stderr": 0.015964400965589664, "mc2": 0.45008851442315223, "mc2_stderr": 0.015144388624059283 }, "harness|winogrande|5": { "acc": 0.7277032359905288, "acc_stderr": 0.012510697991453934 }, "harness|gsm8k|5": { "acc": 0.2266868840030326, "acc_stderr": 0.011532758009339995 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_Sao10K__NyakuraV2.1-m7
[ "region:us" ]
2023-12-12T04:33:49+00:00
{"pretty_name": "Evaluation run of Sao10K/NyakuraV2.1-m7", "dataset_summary": "Dataset automatically created during the evaluation run of model [Sao10K/NyakuraV2.1-m7](https://huggingface.co/Sao10K/NyakuraV2.1-m7) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_Sao10K__NyakuraV2.1-m7\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T04:30:54.576577](https://huggingface.co/datasets/open-llm-leaderboard/details_Sao10K__NyakuraV2.1-m7/blob/main/results_2023-12-12T04-30-54.576577.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.5812856791159661,\n \"acc_stderr\": 0.03351473539841468,\n \"acc_norm\": 0.5885734680789351,\n \"acc_norm_stderr\": 0.03422448074980651,\n \"mc1\": 0.29498164014687883,\n \"mc1_stderr\": 0.015964400965589664,\n \"mc2\": 0.45008851442315223,\n \"mc2_stderr\": 0.015144388624059283\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.5511945392491467,\n \"acc_stderr\": 0.014534599585097662,\n \"acc_norm\": 0.5861774744027304,\n \"acc_norm_stderr\": 0.014392730009221007\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6320454092810197,\n \"acc_stderr\": 0.004812633280078261,\n \"acc_norm\": 0.8188607847042422,\n \"acc_norm_stderr\": 0.003843463792037909\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.28,\n \"acc_stderr\": 0.04512608598542128,\n \"acc_norm\": 0.28,\n \"acc_norm_stderr\": 0.04512608598542128\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.5777777777777777,\n \"acc_stderr\": 0.04266763404099582,\n \"acc_norm\": 0.5777777777777777,\n \"acc_norm_stderr\": 0.04266763404099582\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.6118421052631579,\n \"acc_stderr\": 0.03965842097512744,\n \"acc_norm\": 0.6118421052631579,\n \"acc_norm_stderr\": 0.03965842097512744\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.58,\n \"acc_stderr\": 0.049604496374885836,\n \"acc_norm\": 0.58,\n \"acc_norm_stderr\": 0.049604496374885836\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.630188679245283,\n \"acc_stderr\": 0.02971142188010793,\n \"acc_norm\": 0.630188679245283,\n \"acc_norm_stderr\": 0.02971142188010793\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.6041666666666666,\n \"acc_stderr\": 0.04089465449325582,\n \"acc_norm\": 0.6041666666666666,\n \"acc_norm_stderr\": 0.04089465449325582\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.41,\n \"acc_stderr\": 0.04943110704237102,\n \"acc_norm\": 0.41,\n \"acc_norm_stderr\": 0.04943110704237102\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.54,\n \"acc_stderr\": 0.05009082659620333,\n \"acc_norm\": 0.54,\n \"acc_norm_stderr\": 0.05009082659620333\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.33,\n \"acc_stderr\": 0.047258156262526045,\n \"acc_norm\": 0.33,\n \"acc_norm_stderr\": 0.047258156262526045\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.5780346820809249,\n \"acc_stderr\": 0.0376574669386515,\n \"acc_norm\": 0.5780346820809249,\n \"acc_norm_stderr\": 0.0376574669386515\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.3333333333333333,\n \"acc_stderr\": 0.04690650298201943,\n \"acc_norm\": 0.3333333333333333,\n \"acc_norm_stderr\": 0.04690650298201943\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.71,\n \"acc_stderr\": 0.045604802157206845,\n \"acc_norm\": 0.71,\n \"acc_norm_stderr\": 0.045604802157206845\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.4595744680851064,\n \"acc_stderr\": 0.03257901482099834,\n \"acc_norm\": 0.4595744680851064,\n \"acc_norm_stderr\": 0.03257901482099834\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.4473684210526316,\n \"acc_stderr\": 0.04677473004491199,\n \"acc_norm\": 0.4473684210526316,\n \"acc_norm_stderr\": 0.04677473004491199\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5241379310344828,\n \"acc_stderr\": 0.0416180850350153,\n \"acc_norm\": 0.5241379310344828,\n \"acc_norm_stderr\": 0.0416180850350153\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.373015873015873,\n \"acc_stderr\": 0.02490699045899257,\n \"acc_norm\": 0.373015873015873,\n \"acc_norm_stderr\": 0.02490699045899257\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.3888888888888889,\n \"acc_stderr\": 0.04360314860077459,\n \"acc_norm\": 0.3888888888888889,\n \"acc_norm_stderr\": 0.04360314860077459\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.33,\n \"acc_stderr\": 0.047258156262526045,\n \"acc_norm\": 0.33,\n \"acc_norm_stderr\": 0.047258156262526045\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.6838709677419355,\n \"acc_stderr\": 0.026450874489042774,\n \"acc_norm\": 0.6838709677419355,\n \"acc_norm_stderr\": 0.026450874489042774\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.4827586206896552,\n \"acc_stderr\": 0.035158955511656986,\n \"acc_norm\": 0.4827586206896552,\n \"acc_norm_stderr\": 0.035158955511656986\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.68,\n \"acc_stderr\": 0.04688261722621504,\n \"acc_norm\": 0.68,\n \"acc_norm_stderr\": 0.04688261722621504\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7272727272727273,\n \"acc_stderr\": 0.0347769116216366,\n \"acc_norm\": 0.7272727272727273,\n \"acc_norm_stderr\": 0.0347769116216366\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.702020202020202,\n \"acc_stderr\": 0.03258630383836556,\n \"acc_norm\": 0.702020202020202,\n \"acc_norm_stderr\": 0.03258630383836556\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.8290155440414507,\n \"acc_stderr\": 0.027171213683164542,\n \"acc_norm\": 0.8290155440414507,\n \"acc_norm_stderr\": 0.027171213683164542\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.5461538461538461,\n \"acc_stderr\": 0.025242770987126184,\n \"acc_norm\": 0.5461538461538461,\n \"acc_norm_stderr\": 0.025242770987126184\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.3296296296296296,\n \"acc_stderr\": 0.02866120111652459,\n \"acc_norm\": 0.3296296296296296,\n \"acc_norm_stderr\": 0.02866120111652459\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.5882352941176471,\n \"acc_stderr\": 0.031968769891957786,\n \"acc_norm\": 0.5882352941176471,\n \"acc_norm_stderr\": 0.031968769891957786\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.39072847682119205,\n \"acc_stderr\": 0.03983798306659807,\n \"acc_norm\": 0.39072847682119205,\n \"acc_norm_stderr\": 0.03983798306659807\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.726605504587156,\n \"acc_stderr\": 0.019109299846098292,\n \"acc_norm\": 0.726605504587156,\n \"acc_norm_stderr\": 0.019109299846098292\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.4537037037037037,\n \"acc_stderr\": 0.03395322726375797,\n \"acc_norm\": 0.4537037037037037,\n \"acc_norm_stderr\": 0.03395322726375797\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.7205882352941176,\n \"acc_stderr\": 0.03149328104507957,\n \"acc_norm\": 0.7205882352941176,\n \"acc_norm_stderr\": 0.03149328104507957\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.7172995780590717,\n \"acc_stderr\": 0.029312814153955924,\n \"acc_norm\": 0.7172995780590717,\n \"acc_norm_stderr\": 0.029312814153955924\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6547085201793722,\n \"acc_stderr\": 0.03191100192835794,\n \"acc_norm\": 0.6547085201793722,\n \"acc_norm_stderr\": 0.03191100192835794\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.6946564885496184,\n \"acc_stderr\": 0.04039314978724561,\n \"acc_norm\": 0.6946564885496184,\n \"acc_norm_stderr\": 0.04039314978724561\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.743801652892562,\n \"acc_stderr\": 0.03984979653302871,\n \"acc_norm\": 0.743801652892562,\n \"acc_norm_stderr\": 0.03984979653302871\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.6666666666666666,\n \"acc_stderr\": 0.04557239513497751,\n \"acc_norm\": 0.6666666666666666,\n \"acc_norm_stderr\": 0.04557239513497751\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.7361963190184049,\n \"acc_stderr\": 0.03462419931615623,\n \"acc_norm\": 0.7361963190184049,\n \"acc_norm_stderr\": 0.03462419931615623\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.5178571428571429,\n \"acc_stderr\": 0.04742762361243011,\n \"acc_norm\": 0.5178571428571429,\n \"acc_norm_stderr\": 0.04742762361243011\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.7766990291262136,\n \"acc_stderr\": 0.04123553189891431,\n \"acc_norm\": 0.7766990291262136,\n \"acc_norm_stderr\": 0.04123553189891431\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.7905982905982906,\n \"acc_stderr\": 0.026655699653922737,\n \"acc_norm\": 0.7905982905982906,\n \"acc_norm_stderr\": 0.026655699653922737\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.67,\n \"acc_stderr\": 0.04725815626252607,\n \"acc_norm\": 0.67,\n \"acc_norm_stderr\": 0.04725815626252607\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.7624521072796935,\n \"acc_stderr\": 0.015218733046150193,\n \"acc_norm\": 0.7624521072796935,\n \"acc_norm_stderr\": 0.015218733046150193\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.6473988439306358,\n \"acc_stderr\": 0.02572280220089581,\n \"acc_norm\": 0.6473988439306358,\n \"acc_norm_stderr\": 0.02572280220089581\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.25139664804469275,\n \"acc_stderr\": 0.01450897945355397,\n \"acc_norm\": 0.25139664804469275,\n \"acc_norm_stderr\": 0.01450897945355397\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.6470588235294118,\n \"acc_stderr\": 0.027363593284684965,\n \"acc_norm\": 0.6470588235294118,\n \"acc_norm_stderr\": 0.027363593284684965\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.6591639871382636,\n \"acc_stderr\": 0.026920841260776165,\n \"acc_norm\": 0.6591639871382636,\n \"acc_norm_stderr\": 0.026920841260776165\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.6512345679012346,\n \"acc_stderr\": 0.02651759772446501,\n \"acc_norm\": 0.6512345679012346,\n \"acc_norm_stderr\": 0.02651759772446501\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.42907801418439717,\n \"acc_stderr\": 0.02952591430255856,\n \"acc_norm\": 0.42907801418439717,\n \"acc_norm_stderr\": 0.02952591430255856\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.40547588005215124,\n \"acc_stderr\": 0.012539960672377202,\n \"acc_norm\": 0.40547588005215124,\n \"acc_norm_stderr\": 0.012539960672377202\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.5661764705882353,\n \"acc_stderr\": 0.030105636570016633,\n \"acc_norm\": 0.5661764705882353,\n \"acc_norm_stderr\": 0.030105636570016633\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.5931372549019608,\n \"acc_stderr\": 0.019873802005061177,\n \"acc_norm\": 0.5931372549019608,\n \"acc_norm_stderr\": 0.019873802005061177\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6272727272727273,\n \"acc_stderr\": 0.04631381319425465,\n \"acc_norm\": 0.6272727272727273,\n \"acc_norm_stderr\": 0.04631381319425465\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.673469387755102,\n \"acc_stderr\": 0.030021056238440307,\n \"acc_norm\": 0.673469387755102,\n \"acc_norm_stderr\": 0.030021056238440307\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.835820895522388,\n \"acc_stderr\": 0.02619392354445414,\n \"acc_norm\": 0.835820895522388,\n \"acc_norm_stderr\": 0.02619392354445414\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.81,\n \"acc_stderr\": 0.03942772444036624,\n \"acc_norm\": 0.81,\n \"acc_norm_stderr\": 0.03942772444036624\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.46987951807228917,\n \"acc_stderr\": 0.03885425420866766,\n \"acc_norm\": 0.46987951807228917,\n \"acc_norm_stderr\": 0.03885425420866766\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.7543859649122807,\n \"acc_stderr\": 0.0330140594698725,\n \"acc_norm\": 0.7543859649122807,\n \"acc_norm_stderr\": 0.0330140594698725\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.29498164014687883,\n \"mc1_stderr\": 0.015964400965589664,\n \"mc2\": 0.45008851442315223,\n \"mc2_stderr\": 0.015144388624059283\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7277032359905288,\n \"acc_stderr\": 0.012510697991453934\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.2266868840030326,\n \"acc_stderr\": 0.011532758009339995\n }\n}\n```", "repo_url": "https://huggingface.co/Sao10K/NyakuraV2.1-m7", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|arc:challenge|25_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|gsm8k|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hellaswag|10_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T04-30-54.576577.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["**/details_harness|winogrande|5_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T04-30-54.576577.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T04_30_54.576577", "path": ["results_2023-12-12T04-30-54.576577.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T04-30-54.576577.parquet"]}]}]}
2023-12-12T04:34:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of Sao10K/NyakuraV2.1-m7 Dataset automatically created during the evaluation run of model Sao10K/NyakuraV2.1-m7 on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T04:30:54.576577(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of Sao10K/NyakuraV2.1-m7\n\n\n\nDataset automatically created during the evaluation run of model Sao10K/NyakuraV2.1-m7 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T04:30:54.576577(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of Sao10K/NyakuraV2.1-m7\n\n\n\nDataset automatically created during the evaluation run of model Sao10K/NyakuraV2.1-m7 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T04:30:54.576577(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 183, 68, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of Sao10K/NyakuraV2.1-m7\n\n\n\nDataset automatically created during the evaluation run of model Sao10K/NyakuraV2.1-m7 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T04:30:54.576577(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
39dee4fe420bcd43979189089a3c8eadceadfb56
# Dataset Card for Evaluation run of l3utterfly/minima-3b-layla-v1 <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [l3utterfly/minima-3b-layla-v1](https://huggingface.co/l3utterfly/minima-3b-layla-v1) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_l3utterfly__minima-3b-layla-v1", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T05:22:37.332430](https://huggingface.co/datasets/open-llm-leaderboard/details_l3utterfly__minima-3b-layla-v1/blob/main/results_2023-12-12T05-22-37.332430.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.2926833958149601, "acc_stderr": 0.03204866848858654, "acc_norm": 0.2933238684679158, "acc_norm_stderr": 0.03277522544357182, "mc1": 0.29253365973072215, "mc1_stderr": 0.015925597445286165, "mc2": 0.46464235332296694, "mc2_stderr": 0.01460614608791472 }, "harness|arc:challenge|25": { "acc": 0.39590443686006827, "acc_stderr": 0.014291228393536588, "acc_norm": 0.4232081911262799, "acc_norm_stderr": 0.014438036220848029 }, "harness|hellaswag|10": { "acc": 0.5041824337781319, "acc_stderr": 0.004989606838371069, "acc_norm": 0.6747659828719379, "acc_norm_stderr": 0.004675048151056846 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.23, "acc_stderr": 0.042295258468165065, "acc_norm": 0.23, "acc_norm_stderr": 0.042295258468165065 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.2074074074074074, "acc_stderr": 0.03502553170678318, "acc_norm": 0.2074074074074074, "acc_norm_stderr": 0.03502553170678318 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.2565789473684211, "acc_stderr": 0.0355418036802569, "acc_norm": 0.2565789473684211, "acc_norm_stderr": 0.0355418036802569 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.35, "acc_stderr": 0.047937248544110196, "acc_norm": 0.35, "acc_norm_stderr": 0.047937248544110196 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.27169811320754716, "acc_stderr": 0.027377706624670713, "acc_norm": 0.27169811320754716, "acc_norm_stderr": 0.027377706624670713 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.25, "acc_stderr": 0.03621034121889507, "acc_norm": 0.25, "acc_norm_stderr": 0.03621034121889507 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.3, "acc_stderr": 0.046056618647183814, "acc_norm": 0.3, "acc_norm_stderr": 0.046056618647183814 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.39, "acc_stderr": 0.04902071300001975, "acc_norm": 0.39, "acc_norm_stderr": 0.04902071300001975 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.31, "acc_stderr": 0.04648231987117316, "acc_norm": 0.31, "acc_norm_stderr": 0.04648231987117316 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.23121387283236994, "acc_stderr": 0.03214737302029469, "acc_norm": 0.23121387283236994, "acc_norm_stderr": 0.03214737302029469 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.28431372549019607, "acc_stderr": 0.04488482852329017, "acc_norm": 0.28431372549019607, "acc_norm_stderr": 0.04488482852329017 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.25, "acc_stderr": 0.04351941398892446, "acc_norm": 0.25, "acc_norm_stderr": 0.04351941398892446 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.26382978723404255, "acc_stderr": 0.028809989854102956, "acc_norm": 0.26382978723404255, "acc_norm_stderr": 0.028809989854102956 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.21052631578947367, "acc_stderr": 0.038351539543994194, "acc_norm": 0.21052631578947367, "acc_norm_stderr": 0.038351539543994194 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.2482758620689655, "acc_stderr": 0.036001056927277716, "acc_norm": 0.2482758620689655, "acc_norm_stderr": 0.036001056927277716 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.20899470899470898, "acc_stderr": 0.02094048156533485, "acc_norm": 0.20899470899470898, "acc_norm_stderr": 0.02094048156533485 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.2777777777777778, "acc_stderr": 0.04006168083848877, "acc_norm": 0.2777777777777778, "acc_norm_stderr": 0.04006168083848877 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.3, "acc_stderr": 0.046056618647183814, "acc_norm": 0.3, "acc_norm_stderr": 0.046056618647183814 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.24516129032258063, "acc_stderr": 0.024472243840895528, "acc_norm": 0.24516129032258063, "acc_norm_stderr": 0.024472243840895528 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.2019704433497537, "acc_stderr": 0.02824735012218027, "acc_norm": 0.2019704433497537, "acc_norm_stderr": 0.02824735012218027 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.21, "acc_stderr": 0.040936018074033256, "acc_norm": 0.21, "acc_norm_stderr": 0.040936018074033256 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.2727272727272727, "acc_stderr": 0.03477691162163659, "acc_norm": 0.2727272727272727, "acc_norm_stderr": 0.03477691162163659 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.25252525252525254, "acc_stderr": 0.030954055470365904, "acc_norm": 0.25252525252525254, "acc_norm_stderr": 0.030954055470365904 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.27461139896373055, "acc_stderr": 0.03221024508041154, "acc_norm": 0.27461139896373055, "acc_norm_stderr": 0.03221024508041154 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.32564102564102565, "acc_stderr": 0.02375966576741229, "acc_norm": 0.32564102564102565, "acc_norm_stderr": 0.02375966576741229 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.26296296296296295, "acc_stderr": 0.026842057873833706, "acc_norm": 0.26296296296296295, "acc_norm_stderr": 0.026842057873833706 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.3445378151260504, "acc_stderr": 0.030868682604121622, "acc_norm": 0.3445378151260504, "acc_norm_stderr": 0.030868682604121622 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.3576158940397351, "acc_stderr": 0.03913453431177258, "acc_norm": 0.3576158940397351, "acc_norm_stderr": 0.03913453431177258 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.30091743119266057, "acc_stderr": 0.019664751366802114, "acc_norm": 0.30091743119266057, "acc_norm_stderr": 0.019664751366802114 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.4212962962962963, "acc_stderr": 0.033674621388960775, "acc_norm": 0.4212962962962963, "acc_norm_stderr": 0.033674621388960775 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.28431372549019607, "acc_stderr": 0.03166009679399813, "acc_norm": 0.28431372549019607, "acc_norm_stderr": 0.03166009679399813 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.27848101265822783, "acc_stderr": 0.029178682304842562, "acc_norm": 0.27848101265822783, "acc_norm_stderr": 0.029178682304842562 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.30493273542600896, "acc_stderr": 0.030898610882477515, "acc_norm": 0.30493273542600896, "acc_norm_stderr": 0.030898610882477515 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.37404580152671757, "acc_stderr": 0.042438692422305246, "acc_norm": 0.37404580152671757, "acc_norm_stderr": 0.042438692422305246 }, "harness|hendrycksTest-international_law|5": { "acc": 0.2644628099173554, "acc_stderr": 0.040261875275912046, "acc_norm": 0.2644628099173554, "acc_norm_stderr": 0.040261875275912046 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.23148148148148148, "acc_stderr": 0.04077494709252627, "acc_norm": 0.23148148148148148, "acc_norm_stderr": 0.04077494709252627 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.2331288343558282, "acc_stderr": 0.03322015795776741, "acc_norm": 0.2331288343558282, "acc_norm_stderr": 0.03322015795776741 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.32142857142857145, "acc_stderr": 0.044328040552915185, "acc_norm": 0.32142857142857145, "acc_norm_stderr": 0.044328040552915185 }, "harness|hendrycksTest-management|5": { "acc": 0.2621359223300971, "acc_stderr": 0.04354631077260595, "acc_norm": 0.2621359223300971, "acc_norm_stderr": 0.04354631077260595 }, "harness|hendrycksTest-marketing|5": { "acc": 0.28205128205128205, "acc_stderr": 0.02948036054954119, "acc_norm": 0.28205128205128205, "acc_norm_stderr": 0.02948036054954119 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.34, "acc_stderr": 0.04760952285695235, "acc_norm": 0.34, "acc_norm_stderr": 0.04760952285695235 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.2720306513409962, "acc_stderr": 0.01591336744750051, "acc_norm": 0.2720306513409962, "acc_norm_stderr": 0.01591336744750051 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.28034682080924855, "acc_stderr": 0.02418242749657761, "acc_norm": 0.28034682080924855, "acc_norm_stderr": 0.02418242749657761 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.2670391061452514, "acc_stderr": 0.014796502622562557, "acc_norm": 0.2670391061452514, "acc_norm_stderr": 0.014796502622562557 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.2908496732026144, "acc_stderr": 0.02600480036395211, "acc_norm": 0.2908496732026144, "acc_norm_stderr": 0.02600480036395211 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.3440514469453376, "acc_stderr": 0.026981478043648015, "acc_norm": 0.3440514469453376, "acc_norm_stderr": 0.026981478043648015 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.23765432098765432, "acc_stderr": 0.023683591837008553, "acc_norm": 0.23765432098765432, "acc_norm_stderr": 0.023683591837008553 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.24113475177304963, "acc_stderr": 0.025518731049537755, "acc_norm": 0.24113475177304963, "acc_norm_stderr": 0.025518731049537755 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.2607561929595828, "acc_stderr": 0.01121347155960232, "acc_norm": 0.2607561929595828, "acc_norm_stderr": 0.01121347155960232 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.38235294117647056, "acc_stderr": 0.029520095697687758, "acc_norm": 0.38235294117647056, "acc_norm_stderr": 0.029520095697687758 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.2761437908496732, "acc_stderr": 0.018087276935663137, "acc_norm": 0.2761437908496732, "acc_norm_stderr": 0.018087276935663137 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.2727272727272727, "acc_stderr": 0.04265792110940588, "acc_norm": 0.2727272727272727, "acc_norm_stderr": 0.04265792110940588 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.4, "acc_stderr": 0.03136250240935893, "acc_norm": 0.4, "acc_norm_stderr": 0.03136250240935893 }, "harness|hendrycksTest-sociology|5": { "acc": 0.2736318407960199, "acc_stderr": 0.03152439186555402, "acc_norm": 0.2736318407960199, "acc_norm_stderr": 0.03152439186555402 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.33, "acc_stderr": 0.047258156262526045, "acc_norm": 0.33, "acc_norm_stderr": 0.047258156262526045 }, "harness|hendrycksTest-virology|5": { "acc": 0.25903614457831325, "acc_stderr": 0.034106466140718564, "acc_norm": 0.25903614457831325, "acc_norm_stderr": 0.034106466140718564 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.3333333333333333, "acc_stderr": 0.03615507630310935, "acc_norm": 0.3333333333333333, "acc_norm_stderr": 0.03615507630310935 }, "harness|truthfulqa:mc|0": { "mc1": 0.29253365973072215, "mc1_stderr": 0.015925597445286165, "mc2": 0.46464235332296694, "mc2_stderr": 0.01460614608791472 }, "harness|winogrande|5": { "acc": 0.659037095501184, "acc_stderr": 0.013322681435934793 }, "harness|gsm8k|5": { "acc": 0.08642911296436695, "acc_stderr": 0.007740044337103812 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_l3utterfly__minima-3b-layla-v1
[ "region:us" ]
2023-12-12T05:25:37+00:00
{"pretty_name": "Evaluation run of l3utterfly/minima-3b-layla-v1", "dataset_summary": "Dataset automatically created during the evaluation run of model [l3utterfly/minima-3b-layla-v1](https://huggingface.co/l3utterfly/minima-3b-layla-v1) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_l3utterfly__minima-3b-layla-v1\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T05:22:37.332430](https://huggingface.co/datasets/open-llm-leaderboard/details_l3utterfly__minima-3b-layla-v1/blob/main/results_2023-12-12T05-22-37.332430.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.2926833958149601,\n \"acc_stderr\": 0.03204866848858654,\n \"acc_norm\": 0.2933238684679158,\n \"acc_norm_stderr\": 0.03277522544357182,\n \"mc1\": 0.29253365973072215,\n \"mc1_stderr\": 0.015925597445286165,\n \"mc2\": 0.46464235332296694,\n \"mc2_stderr\": 0.01460614608791472\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.39590443686006827,\n \"acc_stderr\": 0.014291228393536588,\n \"acc_norm\": 0.4232081911262799,\n \"acc_norm_stderr\": 0.014438036220848029\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.5041824337781319,\n \"acc_stderr\": 0.004989606838371069,\n \"acc_norm\": 0.6747659828719379,\n \"acc_norm_stderr\": 0.004675048151056846\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.23,\n \"acc_stderr\": 0.042295258468165065,\n \"acc_norm\": 0.23,\n \"acc_norm_stderr\": 0.042295258468165065\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.2074074074074074,\n \"acc_stderr\": 0.03502553170678318,\n \"acc_norm\": 0.2074074074074074,\n \"acc_norm_stderr\": 0.03502553170678318\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.2565789473684211,\n \"acc_stderr\": 0.0355418036802569,\n \"acc_norm\": 0.2565789473684211,\n \"acc_norm_stderr\": 0.0355418036802569\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.35,\n \"acc_stderr\": 0.047937248544110196,\n \"acc_norm\": 0.35,\n \"acc_norm_stderr\": 0.047937248544110196\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.27169811320754716,\n \"acc_stderr\": 0.027377706624670713,\n \"acc_norm\": 0.27169811320754716,\n \"acc_norm_stderr\": 0.027377706624670713\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.25,\n \"acc_stderr\": 0.03621034121889507,\n \"acc_norm\": 0.25,\n \"acc_norm_stderr\": 0.03621034121889507\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.3,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.3,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.39,\n \"acc_stderr\": 0.04902071300001975,\n \"acc_norm\": 0.39,\n \"acc_norm_stderr\": 0.04902071300001975\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.31,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.31,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.23121387283236994,\n \"acc_stderr\": 0.03214737302029469,\n \"acc_norm\": 0.23121387283236994,\n \"acc_norm_stderr\": 0.03214737302029469\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.28431372549019607,\n \"acc_stderr\": 0.04488482852329017,\n \"acc_norm\": 0.28431372549019607,\n \"acc_norm_stderr\": 0.04488482852329017\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.25,\n \"acc_stderr\": 0.04351941398892446,\n \"acc_norm\": 0.25,\n \"acc_norm_stderr\": 0.04351941398892446\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.26382978723404255,\n \"acc_stderr\": 0.028809989854102956,\n \"acc_norm\": 0.26382978723404255,\n \"acc_norm_stderr\": 0.028809989854102956\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.21052631578947367,\n \"acc_stderr\": 0.038351539543994194,\n \"acc_norm\": 0.21052631578947367,\n \"acc_norm_stderr\": 0.038351539543994194\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.2482758620689655,\n \"acc_stderr\": 0.036001056927277716,\n \"acc_norm\": 0.2482758620689655,\n \"acc_norm_stderr\": 0.036001056927277716\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.20899470899470898,\n \"acc_stderr\": 0.02094048156533485,\n \"acc_norm\": 0.20899470899470898,\n \"acc_norm_stderr\": 0.02094048156533485\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.2777777777777778,\n \"acc_stderr\": 0.04006168083848877,\n \"acc_norm\": 0.2777777777777778,\n \"acc_norm_stderr\": 0.04006168083848877\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.3,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.3,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.24516129032258063,\n \"acc_stderr\": 0.024472243840895528,\n \"acc_norm\": 0.24516129032258063,\n \"acc_norm_stderr\": 0.024472243840895528\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.2019704433497537,\n \"acc_stderr\": 0.02824735012218027,\n \"acc_norm\": 0.2019704433497537,\n \"acc_norm_stderr\": 0.02824735012218027\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.21,\n \"acc_stderr\": 0.040936018074033256,\n \"acc_norm\": 0.21,\n \"acc_norm_stderr\": 0.040936018074033256\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.2727272727272727,\n \"acc_stderr\": 0.03477691162163659,\n \"acc_norm\": 0.2727272727272727,\n \"acc_norm_stderr\": 0.03477691162163659\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.25252525252525254,\n \"acc_stderr\": 0.030954055470365904,\n \"acc_norm\": 0.25252525252525254,\n \"acc_norm_stderr\": 0.030954055470365904\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.27461139896373055,\n \"acc_stderr\": 0.03221024508041154,\n \"acc_norm\": 0.27461139896373055,\n \"acc_norm_stderr\": 0.03221024508041154\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.32564102564102565,\n \"acc_stderr\": 0.02375966576741229,\n \"acc_norm\": 0.32564102564102565,\n \"acc_norm_stderr\": 0.02375966576741229\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.26296296296296295,\n \"acc_stderr\": 0.026842057873833706,\n \"acc_norm\": 0.26296296296296295,\n \"acc_norm_stderr\": 0.026842057873833706\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.3445378151260504,\n \"acc_stderr\": 0.030868682604121622,\n \"acc_norm\": 0.3445378151260504,\n \"acc_norm_stderr\": 0.030868682604121622\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.3576158940397351,\n \"acc_stderr\": 0.03913453431177258,\n \"acc_norm\": 0.3576158940397351,\n \"acc_norm_stderr\": 0.03913453431177258\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.30091743119266057,\n \"acc_stderr\": 0.019664751366802114,\n \"acc_norm\": 0.30091743119266057,\n \"acc_norm_stderr\": 0.019664751366802114\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.4212962962962963,\n \"acc_stderr\": 0.033674621388960775,\n \"acc_norm\": 0.4212962962962963,\n \"acc_norm_stderr\": 0.033674621388960775\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.28431372549019607,\n \"acc_stderr\": 0.03166009679399813,\n \"acc_norm\": 0.28431372549019607,\n \"acc_norm_stderr\": 0.03166009679399813\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.27848101265822783,\n \"acc_stderr\": 0.029178682304842562,\n \"acc_norm\": 0.27848101265822783,\n \"acc_norm_stderr\": 0.029178682304842562\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.30493273542600896,\n \"acc_stderr\": 0.030898610882477515,\n \"acc_norm\": 0.30493273542600896,\n \"acc_norm_stderr\": 0.030898610882477515\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.37404580152671757,\n \"acc_stderr\": 0.042438692422305246,\n \"acc_norm\": 0.37404580152671757,\n \"acc_norm_stderr\": 0.042438692422305246\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.2644628099173554,\n \"acc_stderr\": 0.040261875275912046,\n \"acc_norm\": 0.2644628099173554,\n \"acc_norm_stderr\": 0.040261875275912046\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.23148148148148148,\n \"acc_stderr\": 0.04077494709252627,\n \"acc_norm\": 0.23148148148148148,\n \"acc_norm_stderr\": 0.04077494709252627\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.2331288343558282,\n \"acc_stderr\": 0.03322015795776741,\n \"acc_norm\": 0.2331288343558282,\n \"acc_norm_stderr\": 0.03322015795776741\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.32142857142857145,\n \"acc_stderr\": 0.044328040552915185,\n \"acc_norm\": 0.32142857142857145,\n \"acc_norm_stderr\": 0.044328040552915185\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.2621359223300971,\n \"acc_stderr\": 0.04354631077260595,\n \"acc_norm\": 0.2621359223300971,\n \"acc_norm_stderr\": 0.04354631077260595\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.28205128205128205,\n \"acc_stderr\": 0.02948036054954119,\n \"acc_norm\": 0.28205128205128205,\n \"acc_norm_stderr\": 0.02948036054954119\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.34,\n \"acc_stderr\": 0.04760952285695235,\n \"acc_norm\": 0.34,\n \"acc_norm_stderr\": 0.04760952285695235\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.2720306513409962,\n \"acc_stderr\": 0.01591336744750051,\n \"acc_norm\": 0.2720306513409962,\n \"acc_norm_stderr\": 0.01591336744750051\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.28034682080924855,\n \"acc_stderr\": 0.02418242749657761,\n \"acc_norm\": 0.28034682080924855,\n \"acc_norm_stderr\": 0.02418242749657761\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.2670391061452514,\n \"acc_stderr\": 0.014796502622562557,\n \"acc_norm\": 0.2670391061452514,\n \"acc_norm_stderr\": 0.014796502622562557\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.2908496732026144,\n \"acc_stderr\": 0.02600480036395211,\n \"acc_norm\": 0.2908496732026144,\n \"acc_norm_stderr\": 0.02600480036395211\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.3440514469453376,\n \"acc_stderr\": 0.026981478043648015,\n \"acc_norm\": 0.3440514469453376,\n \"acc_norm_stderr\": 0.026981478043648015\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.23765432098765432,\n \"acc_stderr\": 0.023683591837008553,\n \"acc_norm\": 0.23765432098765432,\n \"acc_norm_stderr\": 0.023683591837008553\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.24113475177304963,\n \"acc_stderr\": 0.025518731049537755,\n \"acc_norm\": 0.24113475177304963,\n \"acc_norm_stderr\": 0.025518731049537755\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.2607561929595828,\n \"acc_stderr\": 0.01121347155960232,\n \"acc_norm\": 0.2607561929595828,\n \"acc_norm_stderr\": 0.01121347155960232\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.38235294117647056,\n \"acc_stderr\": 0.029520095697687758,\n \"acc_norm\": 0.38235294117647056,\n \"acc_norm_stderr\": 0.029520095697687758\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.2761437908496732,\n \"acc_stderr\": 0.018087276935663137,\n \"acc_norm\": 0.2761437908496732,\n \"acc_norm_stderr\": 0.018087276935663137\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.2727272727272727,\n \"acc_stderr\": 0.04265792110940588,\n \"acc_norm\": 0.2727272727272727,\n \"acc_norm_stderr\": 0.04265792110940588\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.4,\n \"acc_stderr\": 0.03136250240935893,\n \"acc_norm\": 0.4,\n \"acc_norm_stderr\": 0.03136250240935893\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.2736318407960199,\n \"acc_stderr\": 0.03152439186555402,\n \"acc_norm\": 0.2736318407960199,\n \"acc_norm_stderr\": 0.03152439186555402\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.33,\n \"acc_stderr\": 0.047258156262526045,\n \"acc_norm\": 0.33,\n \"acc_norm_stderr\": 0.047258156262526045\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.25903614457831325,\n \"acc_stderr\": 0.034106466140718564,\n \"acc_norm\": 0.25903614457831325,\n \"acc_norm_stderr\": 0.034106466140718564\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.3333333333333333,\n \"acc_stderr\": 0.03615507630310935,\n \"acc_norm\": 0.3333333333333333,\n \"acc_norm_stderr\": 0.03615507630310935\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.29253365973072215,\n \"mc1_stderr\": 0.015925597445286165,\n \"mc2\": 0.46464235332296694,\n \"mc2_stderr\": 0.01460614608791472\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.659037095501184,\n \"acc_stderr\": 0.013322681435934793\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.08642911296436695,\n \"acc_stderr\": 0.007740044337103812\n }\n}\n```", "repo_url": "https://huggingface.co/l3utterfly/minima-3b-layla-v1", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|arc:challenge|25_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|gsm8k|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hellaswag|10_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T05-22-37.332430.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["**/details_harness|winogrande|5_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T05-22-37.332430.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T05_22_37.332430", "path": ["results_2023-12-12T05-22-37.332430.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T05-22-37.332430.parquet"]}]}]}
2023-12-12T05:26:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of l3utterfly/minima-3b-layla-v1 Dataset automatically created during the evaluation run of model l3utterfly/minima-3b-layla-v1 on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T05:22:37.332430(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of l3utterfly/minima-3b-layla-v1\n\n\n\nDataset automatically created during the evaluation run of model l3utterfly/minima-3b-layla-v1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T05:22:37.332430(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of l3utterfly/minima-3b-layla-v1\n\n\n\nDataset automatically created during the evaluation run of model l3utterfly/minima-3b-layla-v1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T05:22:37.332430(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 193, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of l3utterfly/minima-3b-layla-v1\n\n\n\nDataset automatically created during the evaluation run of model l3utterfly/minima-3b-layla-v1 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T05:22:37.332430(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]" ]
638d96bba5aa253884d2967bd224c7a0a7d29b20
# Dataset Card for Evaluation run of janhq/supermario-v2 <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [janhq/supermario-v2](https://huggingface.co/janhq/supermario-v2) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_janhq__supermario-v2", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T05:33:32.497051](https://huggingface.co/datasets/open-llm-leaderboard/details_janhq__supermario-v2/blob/main/results_2023-12-12T05-33-32.497051.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.6533206964078709, "acc_stderr": 0.03205268515858169, "acc_norm": 0.6531030767387064, "acc_norm_stderr": 0.03271825548664744, "mc1": 0.44430844553243576, "mc1_stderr": 0.017394586250743173, "mc2": 0.605797177274584, "mc2_stderr": 0.015128279082831566 }, "harness|arc:challenge|25": { "acc": 0.6578498293515358, "acc_stderr": 0.013864152159177275, "acc_norm": 0.6851535836177475, "acc_norm_stderr": 0.013572657703084948 }, "harness|hellaswag|10": { "acc": 0.6763592909778928, "acc_stderr": 0.004669085411342194, "acc_norm": 0.8650667197769368, "acc_norm_stderr": 0.0034095405332498414 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.33, "acc_stderr": 0.04725815626252605, "acc_norm": 0.33, "acc_norm_stderr": 0.04725815626252605 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.6296296296296297, "acc_stderr": 0.041716541613545426, "acc_norm": 0.6296296296296297, "acc_norm_stderr": 0.041716541613545426 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.6907894736842105, "acc_stderr": 0.037610708698674805, "acc_norm": 0.6907894736842105, "acc_norm_stderr": 0.037610708698674805 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.63, "acc_stderr": 0.04852365870939099, "acc_norm": 0.63, "acc_norm_stderr": 0.04852365870939099 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.7056603773584905, "acc_stderr": 0.02804918631569525, "acc_norm": 0.7056603773584905, "acc_norm_stderr": 0.02804918631569525 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.7847222222222222, "acc_stderr": 0.03437079344106135, "acc_norm": 0.7847222222222222, "acc_norm_stderr": 0.03437079344106135 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.46, "acc_stderr": 0.05009082659620333, "acc_norm": 0.46, "acc_norm_stderr": 0.05009082659620333 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.53, "acc_stderr": 0.050161355804659205, "acc_norm": 0.53, "acc_norm_stderr": 0.050161355804659205 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.35, "acc_stderr": 0.047937248544110196, "acc_norm": 0.35, "acc_norm_stderr": 0.047937248544110196 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.6647398843930635, "acc_stderr": 0.03599586301247077, "acc_norm": 0.6647398843930635, "acc_norm_stderr": 0.03599586301247077 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.45098039215686275, "acc_stderr": 0.04951218252396264, "acc_norm": 0.45098039215686275, "acc_norm_stderr": 0.04951218252396264 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.78, "acc_stderr": 0.04163331998932263, "acc_norm": 0.78, "acc_norm_stderr": 0.04163331998932263 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.5957446808510638, "acc_stderr": 0.03208115750788684, "acc_norm": 0.5957446808510638, "acc_norm_stderr": 0.03208115750788684 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.4824561403508772, "acc_stderr": 0.04700708033551038, "acc_norm": 0.4824561403508772, "acc_norm_stderr": 0.04700708033551038 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.5241379310344828, "acc_stderr": 0.0416180850350153, "acc_norm": 0.5241379310344828, "acc_norm_stderr": 0.0416180850350153 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.42857142857142855, "acc_stderr": 0.02548718714785938, "acc_norm": 0.42857142857142855, "acc_norm_stderr": 0.02548718714785938 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.4365079365079365, "acc_stderr": 0.04435932892851466, "acc_norm": 0.4365079365079365, "acc_norm_stderr": 0.04435932892851466 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.35, "acc_stderr": 0.047937248544110196, "acc_norm": 0.35, "acc_norm_stderr": 0.047937248544110196 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.7741935483870968, "acc_stderr": 0.023785577884181015, "acc_norm": 0.7741935483870968, "acc_norm_stderr": 0.023785577884181015 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.47783251231527096, "acc_stderr": 0.03514528562175008, "acc_norm": 0.47783251231527096, "acc_norm_stderr": 0.03514528562175008 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.71, "acc_stderr": 0.045604802157206845, "acc_norm": 0.71, "acc_norm_stderr": 0.045604802157206845 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.7757575757575758, "acc_stderr": 0.03256866661681102, "acc_norm": 0.7757575757575758, "acc_norm_stderr": 0.03256866661681102 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.7929292929292929, "acc_stderr": 0.028869778460267045, "acc_norm": 0.7929292929292929, "acc_norm_stderr": 0.028869778460267045 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.9015544041450777, "acc_stderr": 0.021500249576033456, "acc_norm": 0.9015544041450777, "acc_norm_stderr": 0.021500249576033456 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.6717948717948717, "acc_stderr": 0.023807633198657266, "acc_norm": 0.6717948717948717, "acc_norm_stderr": 0.023807633198657266 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.36666666666666664, "acc_stderr": 0.029381620726465066, "acc_norm": 0.36666666666666664, "acc_norm_stderr": 0.029381620726465066 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.6848739495798319, "acc_stderr": 0.030176808288974337, "acc_norm": 0.6848739495798319, "acc_norm_stderr": 0.030176808288974337 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.3443708609271523, "acc_stderr": 0.038796870240733264, "acc_norm": 0.3443708609271523, "acc_norm_stderr": 0.038796870240733264 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.8550458715596331, "acc_stderr": 0.01509421569970048, "acc_norm": 0.8550458715596331, "acc_norm_stderr": 0.01509421569970048 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.5509259259259259, "acc_stderr": 0.03392238405321617, "acc_norm": 0.5509259259259259, "acc_norm_stderr": 0.03392238405321617 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.8137254901960784, "acc_stderr": 0.027325470966716312, "acc_norm": 0.8137254901960784, "acc_norm_stderr": 0.027325470966716312 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.7974683544303798, "acc_stderr": 0.026160568246601432, "acc_norm": 0.7974683544303798, "acc_norm_stderr": 0.026160568246601432 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.6905829596412556, "acc_stderr": 0.03102441174057221, "acc_norm": 0.6905829596412556, "acc_norm_stderr": 0.03102441174057221 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.8015267175572519, "acc_stderr": 0.03498149385462472, "acc_norm": 0.8015267175572519, "acc_norm_stderr": 0.03498149385462472 }, "harness|hendrycksTest-international_law|5": { "acc": 0.8099173553719008, "acc_stderr": 0.03581796951709282, "acc_norm": 0.8099173553719008, "acc_norm_stderr": 0.03581796951709282 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.7777777777777778, "acc_stderr": 0.040191074725573483, "acc_norm": 0.7777777777777778, "acc_norm_stderr": 0.040191074725573483 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.7730061349693251, "acc_stderr": 0.03291099578615769, "acc_norm": 0.7730061349693251, "acc_norm_stderr": 0.03291099578615769 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.45535714285714285, "acc_stderr": 0.047268355537191, "acc_norm": 0.45535714285714285, "acc_norm_stderr": 0.047268355537191 }, "harness|hendrycksTest-management|5": { "acc": 0.7572815533980582, "acc_stderr": 0.04245022486384495, "acc_norm": 0.7572815533980582, "acc_norm_stderr": 0.04245022486384495 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8760683760683761, "acc_stderr": 0.021586494001281376, "acc_norm": 0.8760683760683761, "acc_norm_stderr": 0.021586494001281376 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.72, "acc_stderr": 0.04512608598542128, "acc_norm": 0.72, "acc_norm_stderr": 0.04512608598542128 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.8301404853128991, "acc_stderr": 0.013428186370608313, "acc_norm": 0.8301404853128991, "acc_norm_stderr": 0.013428186370608313 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.7312138728323699, "acc_stderr": 0.023868003262500104, "acc_norm": 0.7312138728323699, "acc_norm_stderr": 0.023868003262500104 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.38994413407821227, "acc_stderr": 0.01631237662921307, "acc_norm": 0.38994413407821227, "acc_norm_stderr": 0.01631237662921307 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.7352941176470589, "acc_stderr": 0.025261691219729477, "acc_norm": 0.7352941176470589, "acc_norm_stderr": 0.025261691219729477 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.7138263665594855, "acc_stderr": 0.025670259242188933, "acc_norm": 0.7138263665594855, "acc_norm_stderr": 0.025670259242188933 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.7561728395061729, "acc_stderr": 0.023891879541959607, "acc_norm": 0.7561728395061729, "acc_norm_stderr": 0.023891879541959607 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.48936170212765956, "acc_stderr": 0.029820747191422473, "acc_norm": 0.48936170212765956, "acc_norm_stderr": 0.029820747191422473 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.46936114732724904, "acc_stderr": 0.012746237711716634, "acc_norm": 0.46936114732724904, "acc_norm_stderr": 0.012746237711716634 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.6911764705882353, "acc_stderr": 0.02806499816704009, "acc_norm": 0.6911764705882353, "acc_norm_stderr": 0.02806499816704009 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.6830065359477124, "acc_stderr": 0.018824219512706207, "acc_norm": 0.6830065359477124, "acc_norm_stderr": 0.018824219512706207 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.6727272727272727, "acc_stderr": 0.0449429086625209, "acc_norm": 0.6727272727272727, "acc_norm_stderr": 0.0449429086625209 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.7306122448979592, "acc_stderr": 0.02840125202902294, "acc_norm": 0.7306122448979592, "acc_norm_stderr": 0.02840125202902294 }, "harness|hendrycksTest-sociology|5": { "acc": 0.8606965174129353, "acc_stderr": 0.024484487162913973, "acc_norm": 0.8606965174129353, "acc_norm_stderr": 0.024484487162913973 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.86, "acc_stderr": 0.034873508801977704, "acc_norm": 0.86, "acc_norm_stderr": 0.034873508801977704 }, "harness|hendrycksTest-virology|5": { "acc": 0.5421686746987951, "acc_stderr": 0.0387862677100236, "acc_norm": 0.5421686746987951, "acc_norm_stderr": 0.0387862677100236 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8245614035087719, "acc_stderr": 0.029170885500727665, "acc_norm": 0.8245614035087719, "acc_norm_stderr": 0.029170885500727665 }, "harness|truthfulqa:mc|0": { "mc1": 0.44430844553243576, "mc1_stderr": 0.017394586250743173, "mc2": 0.605797177274584, "mc2_stderr": 0.015128279082831566 }, "harness|winogrande|5": { "acc": 0.813733228097869, "acc_stderr": 0.010941877955676206 }, "harness|gsm8k|5": { "acc": 0.7217589082638363, "acc_stderr": 0.012343803671422682 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_janhq__supermario-v2
[ "region:us" ]
2023-12-12T05:36:27+00:00
{"pretty_name": "Evaluation run of janhq/supermario-v2", "dataset_summary": "Dataset automatically created during the evaluation run of model [janhq/supermario-v2](https://huggingface.co/janhq/supermario-v2) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_janhq__supermario-v2\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T05:33:32.497051](https://huggingface.co/datasets/open-llm-leaderboard/details_janhq__supermario-v2/blob/main/results_2023-12-12T05-33-32.497051.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.6533206964078709,\n \"acc_stderr\": 0.03205268515858169,\n \"acc_norm\": 0.6531030767387064,\n \"acc_norm_stderr\": 0.03271825548664744,\n \"mc1\": 0.44430844553243576,\n \"mc1_stderr\": 0.017394586250743173,\n \"mc2\": 0.605797177274584,\n \"mc2_stderr\": 0.015128279082831566\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.6578498293515358,\n \"acc_stderr\": 0.013864152159177275,\n \"acc_norm\": 0.6851535836177475,\n \"acc_norm_stderr\": 0.013572657703084948\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6763592909778928,\n \"acc_stderr\": 0.004669085411342194,\n \"acc_norm\": 0.8650667197769368,\n \"acc_norm_stderr\": 0.0034095405332498414\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.33,\n \"acc_stderr\": 0.04725815626252605,\n \"acc_norm\": 0.33,\n \"acc_norm_stderr\": 0.04725815626252605\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.6296296296296297,\n \"acc_stderr\": 0.041716541613545426,\n \"acc_norm\": 0.6296296296296297,\n \"acc_norm_stderr\": 0.041716541613545426\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.6907894736842105,\n \"acc_stderr\": 0.037610708698674805,\n \"acc_norm\": 0.6907894736842105,\n \"acc_norm_stderr\": 0.037610708698674805\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.63,\n \"acc_stderr\": 0.04852365870939099,\n \"acc_norm\": 0.63,\n \"acc_norm_stderr\": 0.04852365870939099\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.7056603773584905,\n \"acc_stderr\": 0.02804918631569525,\n \"acc_norm\": 0.7056603773584905,\n \"acc_norm_stderr\": 0.02804918631569525\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.7847222222222222,\n \"acc_stderr\": 0.03437079344106135,\n \"acc_norm\": 0.7847222222222222,\n \"acc_norm_stderr\": 0.03437079344106135\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.46,\n \"acc_stderr\": 0.05009082659620333,\n \"acc_norm\": 0.46,\n \"acc_norm_stderr\": 0.05009082659620333\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.53,\n \"acc_stderr\": 0.050161355804659205,\n \"acc_norm\": 0.53,\n \"acc_norm_stderr\": 0.050161355804659205\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.35,\n \"acc_stderr\": 0.047937248544110196,\n \"acc_norm\": 0.35,\n \"acc_norm_stderr\": 0.047937248544110196\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.6647398843930635,\n \"acc_stderr\": 0.03599586301247077,\n \"acc_norm\": 0.6647398843930635,\n \"acc_norm_stderr\": 0.03599586301247077\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.45098039215686275,\n \"acc_stderr\": 0.04951218252396264,\n \"acc_norm\": 0.45098039215686275,\n \"acc_norm_stderr\": 0.04951218252396264\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.78,\n \"acc_stderr\": 0.04163331998932263,\n \"acc_norm\": 0.78,\n \"acc_norm_stderr\": 0.04163331998932263\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.5957446808510638,\n \"acc_stderr\": 0.03208115750788684,\n \"acc_norm\": 0.5957446808510638,\n \"acc_norm_stderr\": 0.03208115750788684\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.4824561403508772,\n \"acc_stderr\": 0.04700708033551038,\n \"acc_norm\": 0.4824561403508772,\n \"acc_norm_stderr\": 0.04700708033551038\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5241379310344828,\n \"acc_stderr\": 0.0416180850350153,\n \"acc_norm\": 0.5241379310344828,\n \"acc_norm_stderr\": 0.0416180850350153\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.42857142857142855,\n \"acc_stderr\": 0.02548718714785938,\n \"acc_norm\": 0.42857142857142855,\n \"acc_norm_stderr\": 0.02548718714785938\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.4365079365079365,\n \"acc_stderr\": 0.04435932892851466,\n \"acc_norm\": 0.4365079365079365,\n \"acc_norm_stderr\": 0.04435932892851466\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.35,\n \"acc_stderr\": 0.047937248544110196,\n \"acc_norm\": 0.35,\n \"acc_norm_stderr\": 0.047937248544110196\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.7741935483870968,\n \"acc_stderr\": 0.023785577884181015,\n \"acc_norm\": 0.7741935483870968,\n \"acc_norm_stderr\": 0.023785577884181015\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.47783251231527096,\n \"acc_stderr\": 0.03514528562175008,\n \"acc_norm\": 0.47783251231527096,\n \"acc_norm_stderr\": 0.03514528562175008\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.71,\n \"acc_stderr\": 0.045604802157206845,\n \"acc_norm\": 0.71,\n \"acc_norm_stderr\": 0.045604802157206845\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7757575757575758,\n \"acc_stderr\": 0.03256866661681102,\n \"acc_norm\": 0.7757575757575758,\n \"acc_norm_stderr\": 0.03256866661681102\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.7929292929292929,\n \"acc_stderr\": 0.028869778460267045,\n \"acc_norm\": 0.7929292929292929,\n \"acc_norm_stderr\": 0.028869778460267045\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.9015544041450777,\n \"acc_stderr\": 0.021500249576033456,\n \"acc_norm\": 0.9015544041450777,\n \"acc_norm_stderr\": 0.021500249576033456\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.6717948717948717,\n \"acc_stderr\": 0.023807633198657266,\n \"acc_norm\": 0.6717948717948717,\n \"acc_norm_stderr\": 0.023807633198657266\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.36666666666666664,\n \"acc_stderr\": 0.029381620726465066,\n \"acc_norm\": 0.36666666666666664,\n \"acc_norm_stderr\": 0.029381620726465066\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.6848739495798319,\n \"acc_stderr\": 0.030176808288974337,\n \"acc_norm\": 0.6848739495798319,\n \"acc_norm_stderr\": 0.030176808288974337\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.3443708609271523,\n \"acc_stderr\": 0.038796870240733264,\n \"acc_norm\": 0.3443708609271523,\n \"acc_norm_stderr\": 0.038796870240733264\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.8550458715596331,\n \"acc_stderr\": 0.01509421569970048,\n \"acc_norm\": 0.8550458715596331,\n \"acc_norm_stderr\": 0.01509421569970048\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.5509259259259259,\n \"acc_stderr\": 0.03392238405321617,\n \"acc_norm\": 0.5509259259259259,\n \"acc_norm_stderr\": 0.03392238405321617\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.8137254901960784,\n \"acc_stderr\": 0.027325470966716312,\n \"acc_norm\": 0.8137254901960784,\n \"acc_norm_stderr\": 0.027325470966716312\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.7974683544303798,\n \"acc_stderr\": 0.026160568246601432,\n \"acc_norm\": 0.7974683544303798,\n \"acc_norm_stderr\": 0.026160568246601432\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6905829596412556,\n \"acc_stderr\": 0.03102441174057221,\n \"acc_norm\": 0.6905829596412556,\n \"acc_norm_stderr\": 0.03102441174057221\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.8015267175572519,\n \"acc_stderr\": 0.03498149385462472,\n \"acc_norm\": 0.8015267175572519,\n \"acc_norm_stderr\": 0.03498149385462472\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.8099173553719008,\n \"acc_stderr\": 0.03581796951709282,\n \"acc_norm\": 0.8099173553719008,\n \"acc_norm_stderr\": 0.03581796951709282\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.7777777777777778,\n \"acc_stderr\": 0.040191074725573483,\n \"acc_norm\": 0.7777777777777778,\n \"acc_norm_stderr\": 0.040191074725573483\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.7730061349693251,\n \"acc_stderr\": 0.03291099578615769,\n \"acc_norm\": 0.7730061349693251,\n \"acc_norm_stderr\": 0.03291099578615769\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.45535714285714285,\n \"acc_stderr\": 0.047268355537191,\n \"acc_norm\": 0.45535714285714285,\n \"acc_norm_stderr\": 0.047268355537191\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.7572815533980582,\n \"acc_stderr\": 0.04245022486384495,\n \"acc_norm\": 0.7572815533980582,\n \"acc_norm_stderr\": 0.04245022486384495\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8760683760683761,\n \"acc_stderr\": 0.021586494001281376,\n \"acc_norm\": 0.8760683760683761,\n \"acc_norm_stderr\": 0.021586494001281376\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.72,\n \"acc_stderr\": 0.04512608598542128,\n \"acc_norm\": 0.72,\n \"acc_norm_stderr\": 0.04512608598542128\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.8301404853128991,\n \"acc_stderr\": 0.013428186370608313,\n \"acc_norm\": 0.8301404853128991,\n \"acc_norm_stderr\": 0.013428186370608313\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.7312138728323699,\n \"acc_stderr\": 0.023868003262500104,\n \"acc_norm\": 0.7312138728323699,\n \"acc_norm_stderr\": 0.023868003262500104\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.38994413407821227,\n \"acc_stderr\": 0.01631237662921307,\n \"acc_norm\": 0.38994413407821227,\n \"acc_norm_stderr\": 0.01631237662921307\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.7352941176470589,\n \"acc_stderr\": 0.025261691219729477,\n \"acc_norm\": 0.7352941176470589,\n \"acc_norm_stderr\": 0.025261691219729477\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.7138263665594855,\n \"acc_stderr\": 0.025670259242188933,\n \"acc_norm\": 0.7138263665594855,\n \"acc_norm_stderr\": 0.025670259242188933\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.7561728395061729,\n \"acc_stderr\": 0.023891879541959607,\n \"acc_norm\": 0.7561728395061729,\n \"acc_norm_stderr\": 0.023891879541959607\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.48936170212765956,\n \"acc_stderr\": 0.029820747191422473,\n \"acc_norm\": 0.48936170212765956,\n \"acc_norm_stderr\": 0.029820747191422473\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.46936114732724904,\n \"acc_stderr\": 0.012746237711716634,\n \"acc_norm\": 0.46936114732724904,\n \"acc_norm_stderr\": 0.012746237711716634\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.6911764705882353,\n \"acc_stderr\": 0.02806499816704009,\n \"acc_norm\": 0.6911764705882353,\n \"acc_norm_stderr\": 0.02806499816704009\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.6830065359477124,\n \"acc_stderr\": 0.018824219512706207,\n \"acc_norm\": 0.6830065359477124,\n \"acc_norm_stderr\": 0.018824219512706207\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6727272727272727,\n \"acc_stderr\": 0.0449429086625209,\n \"acc_norm\": 0.6727272727272727,\n \"acc_norm_stderr\": 0.0449429086625209\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.7306122448979592,\n \"acc_stderr\": 0.02840125202902294,\n \"acc_norm\": 0.7306122448979592,\n \"acc_norm_stderr\": 0.02840125202902294\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.8606965174129353,\n \"acc_stderr\": 0.024484487162913973,\n \"acc_norm\": 0.8606965174129353,\n \"acc_norm_stderr\": 0.024484487162913973\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.86,\n \"acc_stderr\": 0.034873508801977704,\n \"acc_norm\": 0.86,\n \"acc_norm_stderr\": 0.034873508801977704\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.5421686746987951,\n \"acc_stderr\": 0.0387862677100236,\n \"acc_norm\": 0.5421686746987951,\n \"acc_norm_stderr\": 0.0387862677100236\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8245614035087719,\n \"acc_stderr\": 0.029170885500727665,\n \"acc_norm\": 0.8245614035087719,\n \"acc_norm_stderr\": 0.029170885500727665\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.44430844553243576,\n \"mc1_stderr\": 0.017394586250743173,\n \"mc2\": 0.605797177274584,\n \"mc2_stderr\": 0.015128279082831566\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.813733228097869,\n \"acc_stderr\": 0.010941877955676206\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.7217589082638363,\n \"acc_stderr\": 0.012343803671422682\n }\n}\n```", "repo_url": "https://huggingface.co/janhq/supermario-v2", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|arc:challenge|25_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|gsm8k|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hellaswag|10_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T05-33-32.497051.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["**/details_harness|winogrande|5_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T05-33-32.497051.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T05_33_32.497051", "path": ["results_2023-12-12T05-33-32.497051.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T05-33-32.497051.parquet"]}]}]}
2023-12-12T05:37:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of janhq/supermario-v2 Dataset automatically created during the evaluation run of model janhq/supermario-v2 on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T05:33:32.497051(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of janhq/supermario-v2\n\n\n\nDataset automatically created during the evaluation run of model janhq/supermario-v2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T05:33:32.497051(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of janhq/supermario-v2\n\n\n\nDataset automatically created during the evaluation run of model janhq/supermario-v2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T05:33:32.497051(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 181, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of janhq/supermario-v2\n\n\n\nDataset automatically created during the evaluation run of model janhq/supermario-v2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T05:33:32.497051(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
e4268a92cc4f9bf89f48b2d231915cc92f9b6034
# Dataset Card for Evaluation run of Undi95/Clover3-17B <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [Undi95/Clover3-17B](https://huggingface.co/Undi95/Clover3-17B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_Undi95__Clover3-17B", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T06:10:19.622221](https://huggingface.co/datasets/open-llm-leaderboard/details_Undi95__Clover3-17B/blob/main/results_2023-12-12T06-10-19.622221.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.600361059236723, "acc_stderr": 0.033074807649830854, "acc_norm": 0.608082187606879, "acc_norm_stderr": 0.03379934177123045, "mc1": 0.26805385556915545, "mc1_stderr": 0.01550620472283456, "mc2": 0.4072173688663445, "mc2_stderr": 0.014502556892504742 }, "harness|arc:challenge|25": { "acc": 0.5656996587030717, "acc_stderr": 0.01448470304885736, "acc_norm": 0.5989761092150171, "acc_norm_stderr": 0.014322255790719867 }, "harness|hellaswag|10": { "acc": 0.6161123282214698, "acc_stderr": 0.004853371646239244, "acc_norm": 0.811790479984067, "acc_norm_stderr": 0.003900805416736722 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.35, "acc_stderr": 0.0479372485441102, "acc_norm": 0.35, "acc_norm_stderr": 0.0479372485441102 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.5777777777777777, "acc_stderr": 0.04266763404099582, "acc_norm": 0.5777777777777777, "acc_norm_stderr": 0.04266763404099582 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.6513157894736842, "acc_stderr": 0.0387813988879761, "acc_norm": 0.6513157894736842, "acc_norm_stderr": 0.0387813988879761 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.58, "acc_stderr": 0.049604496374885836, "acc_norm": 0.58, "acc_norm_stderr": 0.049604496374885836 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.6226415094339622, "acc_stderr": 0.029832808114796005, "acc_norm": 0.6226415094339622, "acc_norm_stderr": 0.029832808114796005 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.7361111111111112, "acc_stderr": 0.03685651095897532, "acc_norm": 0.7361111111111112, "acc_norm_stderr": 0.03685651095897532 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.43, "acc_stderr": 0.049756985195624284, "acc_norm": 0.43, "acc_norm_stderr": 0.049756985195624284 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.5, "acc_stderr": 0.050251890762960605, "acc_norm": 0.5, "acc_norm_stderr": 0.050251890762960605 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.33, "acc_stderr": 0.047258156262526045, "acc_norm": 0.33, "acc_norm_stderr": 0.047258156262526045 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.5953757225433526, "acc_stderr": 0.03742461193887248, "acc_norm": 0.5953757225433526, "acc_norm_stderr": 0.03742461193887248 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.37254901960784315, "acc_stderr": 0.04810840148082636, "acc_norm": 0.37254901960784315, "acc_norm_stderr": 0.04810840148082636 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.72, "acc_stderr": 0.04512608598542128, "acc_norm": 0.72, "acc_norm_stderr": 0.04512608598542128 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.5531914893617021, "acc_stderr": 0.0325005368436584, "acc_norm": 0.5531914893617021, "acc_norm_stderr": 0.0325005368436584 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.45614035087719296, "acc_stderr": 0.046854730419077895, "acc_norm": 0.45614035087719296, "acc_norm_stderr": 0.046854730419077895 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.5241379310344828, "acc_stderr": 0.0416180850350153, "acc_norm": 0.5241379310344828, "acc_norm_stderr": 0.0416180850350153 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.3888888888888889, "acc_stderr": 0.02510742548113729, "acc_norm": 0.3888888888888889, "acc_norm_stderr": 0.02510742548113729 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.3968253968253968, "acc_stderr": 0.043758884927270605, "acc_norm": 0.3968253968253968, "acc_norm_stderr": 0.043758884927270605 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.37, "acc_stderr": 0.04852365870939099, "acc_norm": 0.37, "acc_norm_stderr": 0.04852365870939099 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.7387096774193549, "acc_stderr": 0.024993053397764826, "acc_norm": 0.7387096774193549, "acc_norm_stderr": 0.024993053397764826 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.46798029556650245, "acc_stderr": 0.035107665979592154, "acc_norm": 0.46798029556650245, "acc_norm_stderr": 0.035107665979592154 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.64, "acc_stderr": 0.048241815132442176, "acc_norm": 0.64, "acc_norm_stderr": 0.048241815132442176 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.7333333333333333, "acc_stderr": 0.03453131801885417, "acc_norm": 0.7333333333333333, "acc_norm_stderr": 0.03453131801885417 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.7424242424242424, "acc_stderr": 0.031156269519646836, "acc_norm": 0.7424242424242424, "acc_norm_stderr": 0.031156269519646836 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.844559585492228, "acc_stderr": 0.026148483469153327, "acc_norm": 0.844559585492228, "acc_norm_stderr": 0.026148483469153327 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.6410256410256411, "acc_stderr": 0.024321738484602354, "acc_norm": 0.6410256410256411, "acc_norm_stderr": 0.024321738484602354 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.3296296296296296, "acc_stderr": 0.028661201116524575, "acc_norm": 0.3296296296296296, "acc_norm_stderr": 0.028661201116524575 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.6848739495798319, "acc_stderr": 0.030176808288974337, "acc_norm": 0.6848739495798319, "acc_norm_stderr": 0.030176808288974337 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.32450331125827814, "acc_stderr": 0.03822746937658753, "acc_norm": 0.32450331125827814, "acc_norm_stderr": 0.03822746937658753 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.818348623853211, "acc_stderr": 0.01653061740926687, "acc_norm": 0.818348623853211, "acc_norm_stderr": 0.01653061740926687 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.4675925925925926, "acc_stderr": 0.034028015813589656, "acc_norm": 0.4675925925925926, "acc_norm_stderr": 0.034028015813589656 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.7549019607843137, "acc_stderr": 0.030190282453501954, "acc_norm": 0.7549019607843137, "acc_norm_stderr": 0.030190282453501954 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.7426160337552743, "acc_stderr": 0.028458820991460305, "acc_norm": 0.7426160337552743, "acc_norm_stderr": 0.028458820991460305 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.6771300448430493, "acc_stderr": 0.03138147637575499, "acc_norm": 0.6771300448430493, "acc_norm_stderr": 0.03138147637575499 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.7175572519083969, "acc_stderr": 0.03948406125768361, "acc_norm": 0.7175572519083969, "acc_norm_stderr": 0.03948406125768361 }, "harness|hendrycksTest-international_law|5": { "acc": 0.7355371900826446, "acc_stderr": 0.04026187527591205, "acc_norm": 0.7355371900826446, "acc_norm_stderr": 0.04026187527591205 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.7314814814814815, "acc_stderr": 0.042844679680521934, "acc_norm": 0.7314814814814815, "acc_norm_stderr": 0.042844679680521934 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.6871165644171779, "acc_stderr": 0.036429145782924055, "acc_norm": 0.6871165644171779, "acc_norm_stderr": 0.036429145782924055 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.45535714285714285, "acc_stderr": 0.047268355537191, "acc_norm": 0.45535714285714285, "acc_norm_stderr": 0.047268355537191 }, "harness|hendrycksTest-management|5": { "acc": 0.7766990291262136, "acc_stderr": 0.04123553189891431, "acc_norm": 0.7766990291262136, "acc_norm_stderr": 0.04123553189891431 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8675213675213675, "acc_stderr": 0.022209309073165612, "acc_norm": 0.8675213675213675, "acc_norm_stderr": 0.022209309073165612 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.71, "acc_stderr": 0.045604802157206845, "acc_norm": 0.71, "acc_norm_stderr": 0.045604802157206845 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.80970625798212, "acc_stderr": 0.014036945850381398, "acc_norm": 0.80970625798212, "acc_norm_stderr": 0.014036945850381398 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.630057803468208, "acc_stderr": 0.02599247202930639, "acc_norm": 0.630057803468208, "acc_norm_stderr": 0.02599247202930639 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.20446927374301677, "acc_stderr": 0.013488813404711919, "acc_norm": 0.20446927374301677, "acc_norm_stderr": 0.013488813404711919 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.6928104575163399, "acc_stderr": 0.026415601914388992, "acc_norm": 0.6928104575163399, "acc_norm_stderr": 0.026415601914388992 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.6720257234726688, "acc_stderr": 0.026664410886937624, "acc_norm": 0.6720257234726688, "acc_norm_stderr": 0.026664410886937624 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.6882716049382716, "acc_stderr": 0.02577311116963046, "acc_norm": 0.6882716049382716, "acc_norm_stderr": 0.02577311116963046 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.4148936170212766, "acc_stderr": 0.029392236584612506, "acc_norm": 0.4148936170212766, "acc_norm_stderr": 0.029392236584612506 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.42698826597131684, "acc_stderr": 0.012633353557534425, "acc_norm": 0.42698826597131684, "acc_norm_stderr": 0.012633353557534425 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.6433823529411765, "acc_stderr": 0.02909720956841195, "acc_norm": 0.6433823529411765, "acc_norm_stderr": 0.02909720956841195 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.6176470588235294, "acc_stderr": 0.01965992249362335, "acc_norm": 0.6176470588235294, "acc_norm_stderr": 0.01965992249362335 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.6, "acc_stderr": 0.0469237132203465, "acc_norm": 0.6, "acc_norm_stderr": 0.0469237132203465 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.6448979591836734, "acc_stderr": 0.030635655150387638, "acc_norm": 0.6448979591836734, "acc_norm_stderr": 0.030635655150387638 }, "harness|hendrycksTest-sociology|5": { "acc": 0.7960199004975125, "acc_stderr": 0.02849317624532607, "acc_norm": 0.7960199004975125, "acc_norm_stderr": 0.02849317624532607 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.82, "acc_stderr": 0.038612291966536955, "acc_norm": 0.82, "acc_norm_stderr": 0.038612291966536955 }, "harness|hendrycksTest-virology|5": { "acc": 0.5481927710843374, "acc_stderr": 0.03874371556587953, "acc_norm": 0.5481927710843374, "acc_norm_stderr": 0.03874371556587953 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8128654970760234, "acc_stderr": 0.02991312723236804, "acc_norm": 0.8128654970760234, "acc_norm_stderr": 0.02991312723236804 }, "harness|truthfulqa:mc|0": { "mc1": 0.26805385556915545, "mc1_stderr": 0.01550620472283456, "mc2": 0.4072173688663445, "mc2_stderr": 0.014502556892504742 }, "harness|winogrande|5": { "acc": 0.7861089187056038, "acc_stderr": 0.011524466954090259 }, "harness|gsm8k|5": { "acc": 0.18802122820318423, "acc_stderr": 0.010762621695354888 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_Undi95__Clover3-17B
[ "region:us" ]
2023-12-12T06:13:20+00:00
{"pretty_name": "Evaluation run of Undi95/Clover3-17B", "dataset_summary": "Dataset automatically created during the evaluation run of model [Undi95/Clover3-17B](https://huggingface.co/Undi95/Clover3-17B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_Undi95__Clover3-17B\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T06:10:19.622221](https://huggingface.co/datasets/open-llm-leaderboard/details_Undi95__Clover3-17B/blob/main/results_2023-12-12T06-10-19.622221.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.600361059236723,\n \"acc_stderr\": 0.033074807649830854,\n \"acc_norm\": 0.608082187606879,\n \"acc_norm_stderr\": 0.03379934177123045,\n \"mc1\": 0.26805385556915545,\n \"mc1_stderr\": 0.01550620472283456,\n \"mc2\": 0.4072173688663445,\n \"mc2_stderr\": 0.014502556892504742\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.5656996587030717,\n \"acc_stderr\": 0.01448470304885736,\n \"acc_norm\": 0.5989761092150171,\n \"acc_norm_stderr\": 0.014322255790719867\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6161123282214698,\n \"acc_stderr\": 0.004853371646239244,\n \"acc_norm\": 0.811790479984067,\n \"acc_norm_stderr\": 0.003900805416736722\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.35,\n \"acc_stderr\": 0.0479372485441102,\n \"acc_norm\": 0.35,\n \"acc_norm_stderr\": 0.0479372485441102\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.5777777777777777,\n \"acc_stderr\": 0.04266763404099582,\n \"acc_norm\": 0.5777777777777777,\n \"acc_norm_stderr\": 0.04266763404099582\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.6513157894736842,\n \"acc_stderr\": 0.0387813988879761,\n \"acc_norm\": 0.6513157894736842,\n \"acc_norm_stderr\": 0.0387813988879761\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.58,\n \"acc_stderr\": 0.049604496374885836,\n \"acc_norm\": 0.58,\n \"acc_norm_stderr\": 0.049604496374885836\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.6226415094339622,\n \"acc_stderr\": 0.029832808114796005,\n \"acc_norm\": 0.6226415094339622,\n \"acc_norm_stderr\": 0.029832808114796005\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.7361111111111112,\n \"acc_stderr\": 0.03685651095897532,\n \"acc_norm\": 0.7361111111111112,\n \"acc_norm_stderr\": 0.03685651095897532\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.43,\n \"acc_stderr\": 0.049756985195624284,\n \"acc_norm\": 0.43,\n \"acc_norm_stderr\": 0.049756985195624284\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.5,\n \"acc_stderr\": 0.050251890762960605,\n \"acc_norm\": 0.5,\n \"acc_norm_stderr\": 0.050251890762960605\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.33,\n \"acc_stderr\": 0.047258156262526045,\n \"acc_norm\": 0.33,\n \"acc_norm_stderr\": 0.047258156262526045\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.5953757225433526,\n \"acc_stderr\": 0.03742461193887248,\n \"acc_norm\": 0.5953757225433526,\n \"acc_norm_stderr\": 0.03742461193887248\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.37254901960784315,\n \"acc_stderr\": 0.04810840148082636,\n \"acc_norm\": 0.37254901960784315,\n \"acc_norm_stderr\": 0.04810840148082636\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.72,\n \"acc_stderr\": 0.04512608598542128,\n \"acc_norm\": 0.72,\n \"acc_norm_stderr\": 0.04512608598542128\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.5531914893617021,\n \"acc_stderr\": 0.0325005368436584,\n \"acc_norm\": 0.5531914893617021,\n \"acc_norm_stderr\": 0.0325005368436584\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.45614035087719296,\n \"acc_stderr\": 0.046854730419077895,\n \"acc_norm\": 0.45614035087719296,\n \"acc_norm_stderr\": 0.046854730419077895\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5241379310344828,\n \"acc_stderr\": 0.0416180850350153,\n \"acc_norm\": 0.5241379310344828,\n \"acc_norm_stderr\": 0.0416180850350153\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.3888888888888889,\n \"acc_stderr\": 0.02510742548113729,\n \"acc_norm\": 0.3888888888888889,\n \"acc_norm_stderr\": 0.02510742548113729\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.3968253968253968,\n \"acc_stderr\": 0.043758884927270605,\n \"acc_norm\": 0.3968253968253968,\n \"acc_norm_stderr\": 0.043758884927270605\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.37,\n \"acc_stderr\": 0.04852365870939099,\n \"acc_norm\": 0.37,\n \"acc_norm_stderr\": 0.04852365870939099\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.7387096774193549,\n \"acc_stderr\": 0.024993053397764826,\n \"acc_norm\": 0.7387096774193549,\n \"acc_norm_stderr\": 0.024993053397764826\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.46798029556650245,\n \"acc_stderr\": 0.035107665979592154,\n \"acc_norm\": 0.46798029556650245,\n \"acc_norm_stderr\": 0.035107665979592154\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.64,\n \"acc_stderr\": 0.048241815132442176,\n \"acc_norm\": 0.64,\n \"acc_norm_stderr\": 0.048241815132442176\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7333333333333333,\n \"acc_stderr\": 0.03453131801885417,\n \"acc_norm\": 0.7333333333333333,\n \"acc_norm_stderr\": 0.03453131801885417\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.7424242424242424,\n \"acc_stderr\": 0.031156269519646836,\n \"acc_norm\": 0.7424242424242424,\n \"acc_norm_stderr\": 0.031156269519646836\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.844559585492228,\n \"acc_stderr\": 0.026148483469153327,\n \"acc_norm\": 0.844559585492228,\n \"acc_norm_stderr\": 0.026148483469153327\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.6410256410256411,\n \"acc_stderr\": 0.024321738484602354,\n \"acc_norm\": 0.6410256410256411,\n \"acc_norm_stderr\": 0.024321738484602354\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.3296296296296296,\n \"acc_stderr\": 0.028661201116524575,\n \"acc_norm\": 0.3296296296296296,\n \"acc_norm_stderr\": 0.028661201116524575\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.6848739495798319,\n \"acc_stderr\": 0.030176808288974337,\n \"acc_norm\": 0.6848739495798319,\n \"acc_norm_stderr\": 0.030176808288974337\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.32450331125827814,\n \"acc_stderr\": 0.03822746937658753,\n \"acc_norm\": 0.32450331125827814,\n \"acc_norm_stderr\": 0.03822746937658753\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.818348623853211,\n \"acc_stderr\": 0.01653061740926687,\n \"acc_norm\": 0.818348623853211,\n \"acc_norm_stderr\": 0.01653061740926687\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.4675925925925926,\n \"acc_stderr\": 0.034028015813589656,\n \"acc_norm\": 0.4675925925925926,\n \"acc_norm_stderr\": 0.034028015813589656\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.7549019607843137,\n \"acc_stderr\": 0.030190282453501954,\n \"acc_norm\": 0.7549019607843137,\n \"acc_norm_stderr\": 0.030190282453501954\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.7426160337552743,\n \"acc_stderr\": 0.028458820991460305,\n \"acc_norm\": 0.7426160337552743,\n \"acc_norm_stderr\": 0.028458820991460305\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6771300448430493,\n \"acc_stderr\": 0.03138147637575499,\n \"acc_norm\": 0.6771300448430493,\n \"acc_norm_stderr\": 0.03138147637575499\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.7175572519083969,\n \"acc_stderr\": 0.03948406125768361,\n \"acc_norm\": 0.7175572519083969,\n \"acc_norm_stderr\": 0.03948406125768361\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.7355371900826446,\n \"acc_stderr\": 0.04026187527591205,\n \"acc_norm\": 0.7355371900826446,\n \"acc_norm_stderr\": 0.04026187527591205\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.7314814814814815,\n \"acc_stderr\": 0.042844679680521934,\n \"acc_norm\": 0.7314814814814815,\n \"acc_norm_stderr\": 0.042844679680521934\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.6871165644171779,\n \"acc_stderr\": 0.036429145782924055,\n \"acc_norm\": 0.6871165644171779,\n \"acc_norm_stderr\": 0.036429145782924055\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.45535714285714285,\n \"acc_stderr\": 0.047268355537191,\n \"acc_norm\": 0.45535714285714285,\n \"acc_norm_stderr\": 0.047268355537191\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.7766990291262136,\n \"acc_stderr\": 0.04123553189891431,\n \"acc_norm\": 0.7766990291262136,\n \"acc_norm_stderr\": 0.04123553189891431\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8675213675213675,\n \"acc_stderr\": 0.022209309073165612,\n \"acc_norm\": 0.8675213675213675,\n \"acc_norm_stderr\": 0.022209309073165612\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.71,\n \"acc_stderr\": 0.045604802157206845,\n \"acc_norm\": 0.71,\n \"acc_norm_stderr\": 0.045604802157206845\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.80970625798212,\n \"acc_stderr\": 0.014036945850381398,\n \"acc_norm\": 0.80970625798212,\n \"acc_norm_stderr\": 0.014036945850381398\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.630057803468208,\n \"acc_stderr\": 0.02599247202930639,\n \"acc_norm\": 0.630057803468208,\n \"acc_norm_stderr\": 0.02599247202930639\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.20446927374301677,\n \"acc_stderr\": 0.013488813404711919,\n \"acc_norm\": 0.20446927374301677,\n \"acc_norm_stderr\": 0.013488813404711919\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.6928104575163399,\n \"acc_stderr\": 0.026415601914388992,\n \"acc_norm\": 0.6928104575163399,\n \"acc_norm_stderr\": 0.026415601914388992\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.6720257234726688,\n \"acc_stderr\": 0.026664410886937624,\n \"acc_norm\": 0.6720257234726688,\n \"acc_norm_stderr\": 0.026664410886937624\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.6882716049382716,\n \"acc_stderr\": 0.02577311116963046,\n \"acc_norm\": 0.6882716049382716,\n \"acc_norm_stderr\": 0.02577311116963046\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.4148936170212766,\n \"acc_stderr\": 0.029392236584612506,\n \"acc_norm\": 0.4148936170212766,\n \"acc_norm_stderr\": 0.029392236584612506\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.42698826597131684,\n \"acc_stderr\": 0.012633353557534425,\n \"acc_norm\": 0.42698826597131684,\n \"acc_norm_stderr\": 0.012633353557534425\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.6433823529411765,\n \"acc_stderr\": 0.02909720956841195,\n \"acc_norm\": 0.6433823529411765,\n \"acc_norm_stderr\": 0.02909720956841195\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.6176470588235294,\n \"acc_stderr\": 0.01965992249362335,\n \"acc_norm\": 0.6176470588235294,\n \"acc_norm_stderr\": 0.01965992249362335\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6,\n \"acc_stderr\": 0.0469237132203465,\n \"acc_norm\": 0.6,\n \"acc_norm_stderr\": 0.0469237132203465\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.6448979591836734,\n \"acc_stderr\": 0.030635655150387638,\n \"acc_norm\": 0.6448979591836734,\n \"acc_norm_stderr\": 0.030635655150387638\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.7960199004975125,\n \"acc_stderr\": 0.02849317624532607,\n \"acc_norm\": 0.7960199004975125,\n \"acc_norm_stderr\": 0.02849317624532607\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.82,\n \"acc_stderr\": 0.038612291966536955,\n \"acc_norm\": 0.82,\n \"acc_norm_stderr\": 0.038612291966536955\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.5481927710843374,\n \"acc_stderr\": 0.03874371556587953,\n \"acc_norm\": 0.5481927710843374,\n \"acc_norm_stderr\": 0.03874371556587953\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8128654970760234,\n \"acc_stderr\": 0.02991312723236804,\n \"acc_norm\": 0.8128654970760234,\n \"acc_norm_stderr\": 0.02991312723236804\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.26805385556915545,\n \"mc1_stderr\": 0.01550620472283456,\n \"mc2\": 0.4072173688663445,\n \"mc2_stderr\": 0.014502556892504742\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7861089187056038,\n \"acc_stderr\": 0.011524466954090259\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.18802122820318423,\n \"acc_stderr\": 0.010762621695354888\n }\n}\n```", "repo_url": "https://huggingface.co/Undi95/Clover3-17B", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|arc:challenge|25_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|gsm8k|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hellaswag|10_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T06-10-19.622221.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["**/details_harness|winogrande|5_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T06-10-19.622221.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T06_10_19.622221", "path": ["results_2023-12-12T06-10-19.622221.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T06-10-19.622221.parquet"]}]}]}
2023-12-12T06:14:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of Undi95/Clover3-17B Dataset automatically created during the evaluation run of model Undi95/Clover3-17B on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T06:10:19.622221(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of Undi95/Clover3-17B\n\n\n\nDataset automatically created during the evaluation run of model Undi95/Clover3-17B on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T06:10:19.622221(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of Undi95/Clover3-17B\n\n\n\nDataset automatically created during the evaluation run of model Undi95/Clover3-17B on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T06:10:19.622221(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 181, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of Undi95/Clover3-17B\n\n\n\nDataset automatically created during the evaluation run of model Undi95/Clover3-17B on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T06:10:19.622221(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
e9a16df610a5beef5a2538a28dbbcb6a3e6e6c41
# Dataset Card for Evaluation run of Sao10K/Venomia-1.1-m7 <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [Sao10K/Venomia-1.1-m7](https://huggingface.co/Sao10K/Venomia-1.1-m7) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_Sao10K__Venomia-1.1-m7", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T06:19:54.268543](https://huggingface.co/datasets/open-llm-leaderboard/details_Sao10K__Venomia-1.1-m7/blob/main/results_2023-12-12T06-19-54.268543.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.5646939992249473, "acc_stderr": 0.03365626984219371, "acc_norm": 0.5687314107018308, "acc_norm_stderr": 0.03434566057163248, "mc1": 0.31946144430844553, "mc1_stderr": 0.0163226441829605, "mc2": 0.47211906040617, "mc2_stderr": 0.015511830880546584 }, "harness|arc:challenge|25": { "acc": 0.5563139931740614, "acc_stderr": 0.014518421825670442, "acc_norm": 0.5844709897610921, "acc_norm_stderr": 0.014401366641216384 }, "harness|hellaswag|10": { "acc": 0.6445927106154152, "acc_stderr": 0.004776583530909567, "acc_norm": 0.8304122684724159, "acc_norm_stderr": 0.0037450326672282797 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.31, "acc_stderr": 0.04648231987117316, "acc_norm": 0.31, "acc_norm_stderr": 0.04648231987117316 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.5555555555555556, "acc_stderr": 0.04292596718256981, "acc_norm": 0.5555555555555556, "acc_norm_stderr": 0.04292596718256981 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.6052631578947368, "acc_stderr": 0.039777499346220734, "acc_norm": 0.6052631578947368, "acc_norm_stderr": 0.039777499346220734 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.51, "acc_stderr": 0.05024183937956912, "acc_norm": 0.51, "acc_norm_stderr": 0.05024183937956912 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.5547169811320755, "acc_stderr": 0.03058805297427065, "acc_norm": 0.5547169811320755, "acc_norm_stderr": 0.03058805297427065 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.6041666666666666, "acc_stderr": 0.04089465449325582, "acc_norm": 0.6041666666666666, "acc_norm_stderr": 0.04089465449325582 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.39, "acc_stderr": 0.04902071300001974, "acc_norm": 0.39, "acc_norm_stderr": 0.04902071300001974 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.49, "acc_stderr": 0.05024183937956912, "acc_norm": 0.49, "acc_norm_stderr": 0.05024183937956912 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.28, "acc_stderr": 0.045126085985421276, "acc_norm": 0.28, "acc_norm_stderr": 0.045126085985421276 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.5549132947976878, "acc_stderr": 0.03789401760283647, "acc_norm": 0.5549132947976878, "acc_norm_stderr": 0.03789401760283647 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.30392156862745096, "acc_stderr": 0.045766654032077615, "acc_norm": 0.30392156862745096, "acc_norm_stderr": 0.045766654032077615 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.76, "acc_stderr": 0.042923469599092816, "acc_norm": 0.76, "acc_norm_stderr": 0.042923469599092816 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.4851063829787234, "acc_stderr": 0.032671518489247764, "acc_norm": 0.4851063829787234, "acc_norm_stderr": 0.032671518489247764 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.41228070175438597, "acc_stderr": 0.046306532033665956, "acc_norm": 0.41228070175438597, "acc_norm_stderr": 0.046306532033665956 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.5517241379310345, "acc_stderr": 0.04144311810878151, "acc_norm": 0.5517241379310345, "acc_norm_stderr": 0.04144311810878151 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.37566137566137564, "acc_stderr": 0.02494236893115979, "acc_norm": 0.37566137566137564, "acc_norm_stderr": 0.02494236893115979 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.3333333333333333, "acc_stderr": 0.042163702135578345, "acc_norm": 0.3333333333333333, "acc_norm_stderr": 0.042163702135578345 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.41, "acc_stderr": 0.049431107042371025, "acc_norm": 0.41, "acc_norm_stderr": 0.049431107042371025 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.6548387096774193, "acc_stderr": 0.027045746573534327, "acc_norm": 0.6548387096774193, "acc_norm_stderr": 0.027045746573534327 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.4630541871921182, "acc_stderr": 0.035083705204426656, "acc_norm": 0.4630541871921182, "acc_norm_stderr": 0.035083705204426656 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.6, "acc_stderr": 0.049236596391733084, "acc_norm": 0.6, "acc_norm_stderr": 0.049236596391733084 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.6848484848484848, "acc_stderr": 0.0362773057502241, "acc_norm": 0.6848484848484848, "acc_norm_stderr": 0.0362773057502241 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.6767676767676768, "acc_stderr": 0.033322999210706444, "acc_norm": 0.6767676767676768, "acc_norm_stderr": 0.033322999210706444 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.772020725388601, "acc_stderr": 0.03027690994517826, "acc_norm": 0.772020725388601, "acc_norm_stderr": 0.03027690994517826 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.4897435897435897, "acc_stderr": 0.025345672221942374, "acc_norm": 0.4897435897435897, "acc_norm_stderr": 0.025345672221942374 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.2962962962962963, "acc_stderr": 0.02784081149587192, "acc_norm": 0.2962962962962963, "acc_norm_stderr": 0.02784081149587192 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.5168067226890757, "acc_stderr": 0.03246013680375308, "acc_norm": 0.5168067226890757, "acc_norm_stderr": 0.03246013680375308 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.3509933774834437, "acc_stderr": 0.03896981964257375, "acc_norm": 0.3509933774834437, "acc_norm_stderr": 0.03896981964257375 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.7155963302752294, "acc_stderr": 0.019342036587702574, "acc_norm": 0.7155963302752294, "acc_norm_stderr": 0.019342036587702574 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.32407407407407407, "acc_stderr": 0.03191923445686185, "acc_norm": 0.32407407407407407, "acc_norm_stderr": 0.03191923445686185 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.7009803921568627, "acc_stderr": 0.03213325717373617, "acc_norm": 0.7009803921568627, "acc_norm_stderr": 0.03213325717373617 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.7130801687763713, "acc_stderr": 0.029443773022594693, "acc_norm": 0.7130801687763713, "acc_norm_stderr": 0.029443773022594693 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.6547085201793722, "acc_stderr": 0.03191100192835794, "acc_norm": 0.6547085201793722, "acc_norm_stderr": 0.03191100192835794 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.7099236641221374, "acc_stderr": 0.03980066246467765, "acc_norm": 0.7099236641221374, "acc_norm_stderr": 0.03980066246467765 }, "harness|hendrycksTest-international_law|5": { "acc": 0.7024793388429752, "acc_stderr": 0.04173349148083499, "acc_norm": 0.7024793388429752, "acc_norm_stderr": 0.04173349148083499 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.6388888888888888, "acc_stderr": 0.04643454608906276, "acc_norm": 0.6388888888888888, "acc_norm_stderr": 0.04643454608906276 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.7116564417177914, "acc_stderr": 0.035590395316173425, "acc_norm": 0.7116564417177914, "acc_norm_stderr": 0.035590395316173425 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.4107142857142857, "acc_stderr": 0.046695106638751906, "acc_norm": 0.4107142857142857, "acc_norm_stderr": 0.046695106638751906 }, "harness|hendrycksTest-management|5": { "acc": 0.6310679611650486, "acc_stderr": 0.0477761518115674, "acc_norm": 0.6310679611650486, "acc_norm_stderr": 0.0477761518115674 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8504273504273504, "acc_stderr": 0.023365051491753715, "acc_norm": 0.8504273504273504, "acc_norm_stderr": 0.023365051491753715 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.66, "acc_stderr": 0.04760952285695237, "acc_norm": 0.66, "acc_norm_stderr": 0.04760952285695237 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.756066411238825, "acc_stderr": 0.015357212665829463, "acc_norm": 0.756066411238825, "acc_norm_stderr": 0.015357212665829463 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.6127167630057804, "acc_stderr": 0.026226158605124655, "acc_norm": 0.6127167630057804, "acc_norm_stderr": 0.026226158605124655 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.28268156424581004, "acc_stderr": 0.015060381730018115, "acc_norm": 0.28268156424581004, "acc_norm_stderr": 0.015060381730018115 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.6437908496732027, "acc_stderr": 0.027420477662629235, "acc_norm": 0.6437908496732027, "acc_norm_stderr": 0.027420477662629235 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.6077170418006431, "acc_stderr": 0.02773125864701199, "acc_norm": 0.6077170418006431, "acc_norm_stderr": 0.02773125864701199 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.6481481481481481, "acc_stderr": 0.026571483480719974, "acc_norm": 0.6481481481481481, "acc_norm_stderr": 0.026571483480719974 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.4148936170212766, "acc_stderr": 0.0293922365846125, "acc_norm": 0.4148936170212766, "acc_norm_stderr": 0.0293922365846125 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.4028683181225554, "acc_stderr": 0.012526955577118014, "acc_norm": 0.4028683181225554, "acc_norm_stderr": 0.012526955577118014 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.48161764705882354, "acc_stderr": 0.03035230339535196, "acc_norm": 0.48161764705882354, "acc_norm_stderr": 0.03035230339535196 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.5898692810457516, "acc_stderr": 0.019898412717635903, "acc_norm": 0.5898692810457516, "acc_norm_stderr": 0.019898412717635903 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.6363636363636364, "acc_stderr": 0.04607582090719976, "acc_norm": 0.6363636363636364, "acc_norm_stderr": 0.04607582090719976 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.6979591836734694, "acc_stderr": 0.0293936093198798, "acc_norm": 0.6979591836734694, "acc_norm_stderr": 0.0293936093198798 }, "harness|hendrycksTest-sociology|5": { "acc": 0.8308457711442786, "acc_stderr": 0.02650859065623326, "acc_norm": 0.8308457711442786, "acc_norm_stderr": 0.02650859065623326 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.83, "acc_stderr": 0.03775251680686371, "acc_norm": 0.83, "acc_norm_stderr": 0.03775251680686371 }, "harness|hendrycksTest-virology|5": { "acc": 0.4879518072289157, "acc_stderr": 0.0389136449583582, "acc_norm": 0.4879518072289157, "acc_norm_stderr": 0.0389136449583582 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8011695906432749, "acc_stderr": 0.030611116557432528, "acc_norm": 0.8011695906432749, "acc_norm_stderr": 0.030611116557432528 }, "harness|truthfulqa:mc|0": { "mc1": 0.31946144430844553, "mc1_stderr": 0.0163226441829605, "mc2": 0.47211906040617, "mc2_stderr": 0.015511830880546584 }, "harness|winogrande|5": { "acc": 0.744277821625888, "acc_stderr": 0.012261253845440473 }, "harness|gsm8k|5": { "acc": 0.36087945413191813, "acc_stderr": 0.013228626753925136 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_Sao10K__Venomia-1.1-m7
[ "region:us" ]
2023-12-12T06:22:45+00:00
{"pretty_name": "Evaluation run of Sao10K/Venomia-1.1-m7", "dataset_summary": "Dataset automatically created during the evaluation run of model [Sao10K/Venomia-1.1-m7](https://huggingface.co/Sao10K/Venomia-1.1-m7) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_Sao10K__Venomia-1.1-m7\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T06:19:54.268543](https://huggingface.co/datasets/open-llm-leaderboard/details_Sao10K__Venomia-1.1-m7/blob/main/results_2023-12-12T06-19-54.268543.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.5646939992249473,\n \"acc_stderr\": 0.03365626984219371,\n \"acc_norm\": 0.5687314107018308,\n \"acc_norm_stderr\": 0.03434566057163248,\n \"mc1\": 0.31946144430844553,\n \"mc1_stderr\": 0.0163226441829605,\n \"mc2\": 0.47211906040617,\n \"mc2_stderr\": 0.015511830880546584\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.5563139931740614,\n \"acc_stderr\": 0.014518421825670442,\n \"acc_norm\": 0.5844709897610921,\n \"acc_norm_stderr\": 0.014401366641216384\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6445927106154152,\n \"acc_stderr\": 0.004776583530909567,\n \"acc_norm\": 0.8304122684724159,\n \"acc_norm_stderr\": 0.0037450326672282797\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.31,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.31,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.5555555555555556,\n \"acc_stderr\": 0.04292596718256981,\n \"acc_norm\": 0.5555555555555556,\n \"acc_norm_stderr\": 0.04292596718256981\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.6052631578947368,\n \"acc_stderr\": 0.039777499346220734,\n \"acc_norm\": 0.6052631578947368,\n \"acc_norm_stderr\": 0.039777499346220734\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.51,\n \"acc_stderr\": 0.05024183937956912,\n \"acc_norm\": 0.51,\n \"acc_norm_stderr\": 0.05024183937956912\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.5547169811320755,\n \"acc_stderr\": 0.03058805297427065,\n \"acc_norm\": 0.5547169811320755,\n \"acc_norm_stderr\": 0.03058805297427065\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.6041666666666666,\n \"acc_stderr\": 0.04089465449325582,\n \"acc_norm\": 0.6041666666666666,\n \"acc_norm_stderr\": 0.04089465449325582\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.39,\n \"acc_stderr\": 0.04902071300001974,\n \"acc_norm\": 0.39,\n \"acc_norm_stderr\": 0.04902071300001974\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.49,\n \"acc_stderr\": 0.05024183937956912,\n \"acc_norm\": 0.49,\n \"acc_norm_stderr\": 0.05024183937956912\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.28,\n \"acc_stderr\": 0.045126085985421276,\n \"acc_norm\": 0.28,\n \"acc_norm_stderr\": 0.045126085985421276\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.5549132947976878,\n \"acc_stderr\": 0.03789401760283647,\n \"acc_norm\": 0.5549132947976878,\n \"acc_norm_stderr\": 0.03789401760283647\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.30392156862745096,\n \"acc_stderr\": 0.045766654032077615,\n \"acc_norm\": 0.30392156862745096,\n \"acc_norm_stderr\": 0.045766654032077615\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.76,\n \"acc_stderr\": 0.042923469599092816,\n \"acc_norm\": 0.76,\n \"acc_norm_stderr\": 0.042923469599092816\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.4851063829787234,\n \"acc_stderr\": 0.032671518489247764,\n \"acc_norm\": 0.4851063829787234,\n \"acc_norm_stderr\": 0.032671518489247764\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.41228070175438597,\n \"acc_stderr\": 0.046306532033665956,\n \"acc_norm\": 0.41228070175438597,\n \"acc_norm_stderr\": 0.046306532033665956\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5517241379310345,\n \"acc_stderr\": 0.04144311810878151,\n \"acc_norm\": 0.5517241379310345,\n \"acc_norm_stderr\": 0.04144311810878151\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.37566137566137564,\n \"acc_stderr\": 0.02494236893115979,\n \"acc_norm\": 0.37566137566137564,\n \"acc_norm_stderr\": 0.02494236893115979\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.3333333333333333,\n \"acc_stderr\": 0.042163702135578345,\n \"acc_norm\": 0.3333333333333333,\n \"acc_norm_stderr\": 0.042163702135578345\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.41,\n \"acc_stderr\": 0.049431107042371025,\n \"acc_norm\": 0.41,\n \"acc_norm_stderr\": 0.049431107042371025\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.6548387096774193,\n \"acc_stderr\": 0.027045746573534327,\n \"acc_norm\": 0.6548387096774193,\n \"acc_norm_stderr\": 0.027045746573534327\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.4630541871921182,\n \"acc_stderr\": 0.035083705204426656,\n \"acc_norm\": 0.4630541871921182,\n \"acc_norm_stderr\": 0.035083705204426656\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.6,\n \"acc_stderr\": 0.049236596391733084,\n \"acc_norm\": 0.6,\n \"acc_norm_stderr\": 0.049236596391733084\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.6848484848484848,\n \"acc_stderr\": 0.0362773057502241,\n \"acc_norm\": 0.6848484848484848,\n \"acc_norm_stderr\": 0.0362773057502241\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.6767676767676768,\n \"acc_stderr\": 0.033322999210706444,\n \"acc_norm\": 0.6767676767676768,\n \"acc_norm_stderr\": 0.033322999210706444\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.772020725388601,\n \"acc_stderr\": 0.03027690994517826,\n \"acc_norm\": 0.772020725388601,\n \"acc_norm_stderr\": 0.03027690994517826\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.4897435897435897,\n \"acc_stderr\": 0.025345672221942374,\n \"acc_norm\": 0.4897435897435897,\n \"acc_norm_stderr\": 0.025345672221942374\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.2962962962962963,\n \"acc_stderr\": 0.02784081149587192,\n \"acc_norm\": 0.2962962962962963,\n \"acc_norm_stderr\": 0.02784081149587192\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.5168067226890757,\n \"acc_stderr\": 0.03246013680375308,\n \"acc_norm\": 0.5168067226890757,\n \"acc_norm_stderr\": 0.03246013680375308\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.3509933774834437,\n \"acc_stderr\": 0.03896981964257375,\n \"acc_norm\": 0.3509933774834437,\n \"acc_norm_stderr\": 0.03896981964257375\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.7155963302752294,\n \"acc_stderr\": 0.019342036587702574,\n \"acc_norm\": 0.7155963302752294,\n \"acc_norm_stderr\": 0.019342036587702574\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.32407407407407407,\n \"acc_stderr\": 0.03191923445686185,\n \"acc_norm\": 0.32407407407407407,\n \"acc_norm_stderr\": 0.03191923445686185\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.7009803921568627,\n \"acc_stderr\": 0.03213325717373617,\n \"acc_norm\": 0.7009803921568627,\n \"acc_norm_stderr\": 0.03213325717373617\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.7130801687763713,\n \"acc_stderr\": 0.029443773022594693,\n \"acc_norm\": 0.7130801687763713,\n \"acc_norm_stderr\": 0.029443773022594693\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6547085201793722,\n \"acc_stderr\": 0.03191100192835794,\n \"acc_norm\": 0.6547085201793722,\n \"acc_norm_stderr\": 0.03191100192835794\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.7099236641221374,\n \"acc_stderr\": 0.03980066246467765,\n \"acc_norm\": 0.7099236641221374,\n \"acc_norm_stderr\": 0.03980066246467765\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.7024793388429752,\n \"acc_stderr\": 0.04173349148083499,\n \"acc_norm\": 0.7024793388429752,\n \"acc_norm_stderr\": 0.04173349148083499\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.6388888888888888,\n \"acc_stderr\": 0.04643454608906276,\n \"acc_norm\": 0.6388888888888888,\n \"acc_norm_stderr\": 0.04643454608906276\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.7116564417177914,\n \"acc_stderr\": 0.035590395316173425,\n \"acc_norm\": 0.7116564417177914,\n \"acc_norm_stderr\": 0.035590395316173425\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.4107142857142857,\n \"acc_stderr\": 0.046695106638751906,\n \"acc_norm\": 0.4107142857142857,\n \"acc_norm_stderr\": 0.046695106638751906\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.6310679611650486,\n \"acc_stderr\": 0.0477761518115674,\n \"acc_norm\": 0.6310679611650486,\n \"acc_norm_stderr\": 0.0477761518115674\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8504273504273504,\n \"acc_stderr\": 0.023365051491753715,\n \"acc_norm\": 0.8504273504273504,\n \"acc_norm_stderr\": 0.023365051491753715\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.66,\n \"acc_stderr\": 0.04760952285695237,\n \"acc_norm\": 0.66,\n \"acc_norm_stderr\": 0.04760952285695237\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.756066411238825,\n \"acc_stderr\": 0.015357212665829463,\n \"acc_norm\": 0.756066411238825,\n \"acc_norm_stderr\": 0.015357212665829463\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.6127167630057804,\n \"acc_stderr\": 0.026226158605124655,\n \"acc_norm\": 0.6127167630057804,\n \"acc_norm_stderr\": 0.026226158605124655\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.28268156424581004,\n \"acc_stderr\": 0.015060381730018115,\n \"acc_norm\": 0.28268156424581004,\n \"acc_norm_stderr\": 0.015060381730018115\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.6437908496732027,\n \"acc_stderr\": 0.027420477662629235,\n \"acc_norm\": 0.6437908496732027,\n \"acc_norm_stderr\": 0.027420477662629235\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.6077170418006431,\n \"acc_stderr\": 0.02773125864701199,\n \"acc_norm\": 0.6077170418006431,\n \"acc_norm_stderr\": 0.02773125864701199\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.6481481481481481,\n \"acc_stderr\": 0.026571483480719974,\n \"acc_norm\": 0.6481481481481481,\n \"acc_norm_stderr\": 0.026571483480719974\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.4148936170212766,\n \"acc_stderr\": 0.0293922365846125,\n \"acc_norm\": 0.4148936170212766,\n \"acc_norm_stderr\": 0.0293922365846125\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.4028683181225554,\n \"acc_stderr\": 0.012526955577118014,\n \"acc_norm\": 0.4028683181225554,\n \"acc_norm_stderr\": 0.012526955577118014\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.48161764705882354,\n \"acc_stderr\": 0.03035230339535196,\n \"acc_norm\": 0.48161764705882354,\n \"acc_norm_stderr\": 0.03035230339535196\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.5898692810457516,\n \"acc_stderr\": 0.019898412717635903,\n \"acc_norm\": 0.5898692810457516,\n \"acc_norm_stderr\": 0.019898412717635903\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6363636363636364,\n \"acc_stderr\": 0.04607582090719976,\n \"acc_norm\": 0.6363636363636364,\n \"acc_norm_stderr\": 0.04607582090719976\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.6979591836734694,\n \"acc_stderr\": 0.0293936093198798,\n \"acc_norm\": 0.6979591836734694,\n \"acc_norm_stderr\": 0.0293936093198798\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.8308457711442786,\n \"acc_stderr\": 0.02650859065623326,\n \"acc_norm\": 0.8308457711442786,\n \"acc_norm_stderr\": 0.02650859065623326\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.83,\n \"acc_stderr\": 0.03775251680686371,\n \"acc_norm\": 0.83,\n \"acc_norm_stderr\": 0.03775251680686371\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.4879518072289157,\n \"acc_stderr\": 0.0389136449583582,\n \"acc_norm\": 0.4879518072289157,\n \"acc_norm_stderr\": 0.0389136449583582\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8011695906432749,\n \"acc_stderr\": 0.030611116557432528,\n \"acc_norm\": 0.8011695906432749,\n \"acc_norm_stderr\": 0.030611116557432528\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.31946144430844553,\n \"mc1_stderr\": 0.0163226441829605,\n \"mc2\": 0.47211906040617,\n \"mc2_stderr\": 0.015511830880546584\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.744277821625888,\n \"acc_stderr\": 0.012261253845440473\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.36087945413191813,\n \"acc_stderr\": 0.013228626753925136\n }\n}\n```", "repo_url": "https://huggingface.co/Sao10K/Venomia-1.1-m7", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|arc:challenge|25_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|gsm8k|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hellaswag|10_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T06-19-54.268543.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["**/details_harness|winogrande|5_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T06-19-54.268543.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T06_19_54.268543", "path": ["results_2023-12-12T06-19-54.268543.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T06-19-54.268543.parquet"]}]}]}
2023-12-12T06:23:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of Sao10K/Venomia-1.1-m7 Dataset automatically created during the evaluation run of model Sao10K/Venomia-1.1-m7 on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T06:19:54.268543(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of Sao10K/Venomia-1.1-m7\n\n\n\nDataset automatically created during the evaluation run of model Sao10K/Venomia-1.1-m7 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T06:19:54.268543(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of Sao10K/Venomia-1.1-m7\n\n\n\nDataset automatically created during the evaluation run of model Sao10K/Venomia-1.1-m7 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T06:19:54.268543(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 183, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of Sao10K/Venomia-1.1-m7\n\n\n\nDataset automatically created during the evaluation run of model Sao10K/Venomia-1.1-m7 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T06:19:54.268543(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
727781eada2f4a6bf2416aa676c0d8a80a018325
A big shout out to AllenAI, you guys rock! 从[WildChat](https://huggingface.co/datasets/allenai/WildChat)中抽出中文对话,但是因为发现了很多重复对话,有的人会反复的用一个prompt进行提问,有的人会换3.5或4去问同样的问题,所以进行了简单的去重。 去重方法大致为,使用[bert-base-chinese](https://huggingface.co/bert-base-chinese)将第一个问题转换为embedding,使用[类knn的方法](https://arxiv.org/pdf/1708.00489.pdf)抽取了1万条。并转换成了sharegpt格式。 注意!在对话中发现了NSFW的内容,并没有进行过滤,使用请注意甄别。 你会找到三个jsonl文件: * wildchat-seed-multi-200.json 是使用每一个单独的Dialogue的首个HumanQuestion为基础,采样的200个种子任务,用于EvolInsturction。 * Subsample_10K.jsonl 原始版本,是使用每一个单独的Dialogue的首个HumanQuestion为基础,采样的1万个对话。 * 1213_Wildchat_zh_Sharegpt_ConcatSubsample_20k.jsonl 更新,是使用每一个单独的Dialogue的所有HumanQuestion concat起来为基础,采样的2万个对话。(这么做是考虑有不少问题起手就是你好,但是从第二轮开始问题还是很有价值的。 当从原始的10万条中文,仅抽取1000条的时候,embedding tsne visualization: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6413d7be996b2e426f230fb7/XUqllUlGcgY42bD1IYmVJ.png) Wildchat和Moss003有什么区别呢?如图,wildchat20k(红色),moss30K(绿色)。可以看到大概的趋势是Moss只占了Wildchat的一部分语义空间。 ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6413d7be996b2e426f230fb7/k0KnGcO95kHYVPKCNOJ7t.png)
lorinma/Slim-Wildchat-zh
[ "task_categories:conversational", "task_categories:text-generation", "size_categories:10K<n<100K", "language:zh", "arxiv:1708.00489", "region:us" ]
2023-12-12T06:26:19+00:00
{"language": ["zh"], "size_categories": ["10K<n<100K"], "task_categories": ["conversational", "text-generation"]}
2023-12-20T06:29:18+00:00
[ "1708.00489" ]
[ "zh" ]
TAGS #task_categories-conversational #task_categories-text-generation #size_categories-10K<n<100K #language-Chinese #arxiv-1708.00489 #region-us
A big shout out to AllenAI, you guys rock! 从WildChat中抽出中文对话,但是因为发现了很多重复对话,有的人会反复的用一个prompt进行提问,有的人会换3.5或4去问同样的问题,所以进行了简单的去重。 去重方法大致为,使用bert-base-chinese将第一个问题转换为embedding,使用类knn的方法抽取了1万条。并转换成了sharegpt格式。 注意!在对话中发现了NSFW的内容,并没有进行过滤,使用请注意甄别。 你会找到三个jsonl文件: * URL 是使用每一个单独的Dialogue的首个HumanQuestion为基础,采样的200个种子任务,用于EvolInsturction。 * Subsample_10K.jsonl 原始版本,是使用每一个单独的Dialogue的首个HumanQuestion为基础,采样的1万个对话。 * 1213_Wildchat_zh_Sharegpt_ConcatSubsample_20k.jsonl 更新,是使用每一个单独的Dialogue的所有HumanQuestion concat起来为基础,采样的2万个对话。(这么做是考虑有不少问题起手就是你好,但是从第二轮开始问题还是很有价值的。 当从原始的10万条中文,仅抽取1000条的时候,embedding tsne visualization: !image/png Wildchat和Moss003有什么区别呢?如图,wildchat20k(红色),moss30K(绿色)。可以看到大概的趋势是Moss只占了Wildchat的一部分语义空间。 !image/png
[]
[ "TAGS\n#task_categories-conversational #task_categories-text-generation #size_categories-10K<n<100K #language-Chinese #arxiv-1708.00489 #region-us \n" ]
[ 52 ]
[ "passage: TAGS\n#task_categories-conversational #task_categories-text-generation #size_categories-10K<n<100K #language-Chinese #arxiv-1708.00489 #region-us \n" ]
9673fd811dd8419c6c03a90ae1979e24631de853
# Dataset Card for Financial Fraud Labeled Dataset <!-- Provide a quick summary of the dataset. --> ## Dataset Details This dataset collects financial filings from various companies submitted to the U.S. Securities and Exchange Commission (SEC). The dataset consists of 85 companies involved in fraudulent cases and an equal number of companies not involved in fraudulent activities. The Fillings column includes information such as the company's MD&A, and financial statement over the years the company stated on the SEC website. This dataset was used for research in detecting financial fraud using multiple LLMs and traditional machine-learning models. - **Curated by:** [Amit Kedia](https://www.linkedin.com/in/theamitkedia/) - **Language(s) (NLP):** English - **License:** Apache 2.0 ### Dataset Sources - **Repository:** [GitHub](https://github.com/amitkedia007/Financial-Fraud-Detection-Using-LLMs) - **Thesis:** [Financial Fraud Detection using LLMs](https://github.com/amitkedia007/Financial-Fraud-Detection-Using-LLMs/blob/main/Detailed_Report_on_financial_fraud_detection.pdf) ### Direct Use <!-- This section describes suitable use cases for the dataset. --> #### Code to Directly use the dataset: from datasets import load_dataset dataset = load_dataset("amitkedia/Financial-Fraud-Dataset") ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> There are some limitations of the dataset: 1. This dataset is designed for acedemic research 2. The text needs to be cleaned for further process 3. The dataset does not cover all the fradulent cases and are limited to Securities and Exchange Commision of USA (SEC) that means the fradulent and non fradulent cases are the companies of USA ## Dataset Structure For the structure of the dataset look into the dataset viewer. ## Dataset Creation Check out the Thesis ### Curation Rationale <!-- Motivation for the creation of this dataset. --> To help the financial industry develop the best model to detect fraudulent activities which can save billions of dollars for government and banks #### Data Collection and Processing Please Refer to the Thesis ## Dataset Card Authors [Amit Kedia](https://www.linkedin.com/in/theamitkedia/)
amitkedia/Financial-Fraud-Dataset
[ "task_categories:text-classification", "size_categories:1K<n<10K", "language:en", "license:apache-2.0", "finance", "region:us" ]
2023-12-12T06:31:55+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "tags": ["finance"]}
2023-12-19T14:17:46+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #size_categories-1K<n<10K #language-English #license-apache-2.0 #finance #region-us
# Dataset Card for Financial Fraud Labeled Dataset ## Dataset Details This dataset collects financial filings from various companies submitted to the U.S. Securities and Exchange Commission (SEC). The dataset consists of 85 companies involved in fraudulent cases and an equal number of companies not involved in fraudulent activities. The Fillings column includes information such as the company's MD&A, and financial statement over the years the company stated on the SEC website. This dataset was used for research in detecting financial fraud using multiple LLMs and traditional machine-learning models. - Curated by: Amit Kedia - Language(s) (NLP): English - License: Apache 2.0 ### Dataset Sources - Repository: GitHub - Thesis: Financial Fraud Detection using LLMs ### Direct Use #### Code to Directly use the dataset: from datasets import load_dataset dataset = load_dataset("amitkedia/Financial-Fraud-Dataset") ### Out-of-Scope Use There are some limitations of the dataset: 1. This dataset is designed for acedemic research 2. The text needs to be cleaned for further process 3. The dataset does not cover all the fradulent cases and are limited to Securities and Exchange Commision of USA (SEC) that means the fradulent and non fradulent cases are the companies of USA ## Dataset Structure For the structure of the dataset look into the dataset viewer. ## Dataset Creation Check out the Thesis ### Curation Rationale To help the financial industry develop the best model to detect fraudulent activities which can save billions of dollars for government and banks #### Data Collection and Processing Please Refer to the Thesis ## Dataset Card Authors Amit Kedia
[ "# Dataset Card for Financial Fraud Labeled Dataset", "## Dataset Details\n\nThis dataset collects financial filings from various companies submitted to the U.S. Securities and Exchange Commission (SEC). The dataset consists of 85 companies involved in fraudulent cases and an equal number of companies not involved in fraudulent activities. The Fillings column includes information such as the company's MD&A, and financial statement over the years the company stated on the SEC website.\n\nThis dataset was used for research in detecting financial fraud using multiple LLMs and traditional machine-learning models.\n\n- Curated by: Amit Kedia\n- Language(s) (NLP): English\n- License: Apache 2.0", "### Dataset Sources\n\n- Repository: GitHub\n- Thesis: Financial Fraud Detection using LLMs", "### Direct Use", "#### Code to Directly use the dataset:\n\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"amitkedia/Financial-Fraud-Dataset\")", "### Out-of-Scope Use\n\n\nThere are some limitations of the dataset:\n1. This dataset is designed for acedemic research \n2. The text needs to be cleaned for further process\n3. The dataset does not cover all the fradulent cases and are limited to Securities and Exchange Commision of USA (SEC) that means the fradulent and non fradulent cases are the companies of USA", "## Dataset Structure\n\nFor the structure of the dataset look into the dataset viewer.", "## Dataset Creation\n\nCheck out the Thesis", "### Curation Rationale\n\n\n\nTo help the financial industry develop the best model to detect fraudulent activities which can save billions of dollars for government and banks", "#### Data Collection and Processing\n\nPlease Refer to the Thesis", "## Dataset Card Authors\n\nAmit Kedia" ]
[ "TAGS\n#task_categories-text-classification #size_categories-1K<n<10K #language-English #license-apache-2.0 #finance #region-us \n", "# Dataset Card for Financial Fraud Labeled Dataset", "## Dataset Details\n\nThis dataset collects financial filings from various companies submitted to the U.S. Securities and Exchange Commission (SEC). The dataset consists of 85 companies involved in fraudulent cases and an equal number of companies not involved in fraudulent activities. The Fillings column includes information such as the company's MD&A, and financial statement over the years the company stated on the SEC website.\n\nThis dataset was used for research in detecting financial fraud using multiple LLMs and traditional machine-learning models.\n\n- Curated by: Amit Kedia\n- Language(s) (NLP): English\n- License: Apache 2.0", "### Dataset Sources\n\n- Repository: GitHub\n- Thesis: Financial Fraud Detection using LLMs", "### Direct Use", "#### Code to Directly use the dataset:\n\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"amitkedia/Financial-Fraud-Dataset\")", "### Out-of-Scope Use\n\n\nThere are some limitations of the dataset:\n1. This dataset is designed for acedemic research \n2. The text needs to be cleaned for further process\n3. The dataset does not cover all the fradulent cases and are limited to Securities and Exchange Commision of USA (SEC) that means the fradulent and non fradulent cases are the companies of USA", "## Dataset Structure\n\nFor the structure of the dataset look into the dataset viewer.", "## Dataset Creation\n\nCheck out the Thesis", "### Curation Rationale\n\n\n\nTo help the financial industry develop the best model to detect fraudulent activities which can save billions of dollars for government and banks", "#### Data Collection and Processing\n\nPlease Refer to the Thesis", "## Dataset Card Authors\n\nAmit Kedia" ]
[ 44, 12, 141, 27, 4, 42, 88, 21, 10, 34, 13, 9 ]
[ "passage: TAGS\n#task_categories-text-classification #size_categories-1K<n<10K #language-English #license-apache-2.0 #finance #region-us \n# Dataset Card for Financial Fraud Labeled Dataset## Dataset Details\n\nThis dataset collects financial filings from various companies submitted to the U.S. Securities and Exchange Commission (SEC). The dataset consists of 85 companies involved in fraudulent cases and an equal number of companies not involved in fraudulent activities. The Fillings column includes information such as the company's MD&A, and financial statement over the years the company stated on the SEC website.\n\nThis dataset was used for research in detecting financial fraud using multiple LLMs and traditional machine-learning models.\n\n- Curated by: Amit Kedia\n- Language(s) (NLP): English\n- License: Apache 2.0### Dataset Sources\n\n- Repository: GitHub\n- Thesis: Financial Fraud Detection using LLMs### Direct Use#### Code to Directly use the dataset:\n\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"amitkedia/Financial-Fraud-Dataset\")### Out-of-Scope Use\n\n\nThere are some limitations of the dataset:\n1. This dataset is designed for acedemic research \n2. The text needs to be cleaned for further process\n3. The dataset does not cover all the fradulent cases and are limited to Securities and Exchange Commision of USA (SEC) that means the fradulent and non fradulent cases are the companies of USA## Dataset Structure\n\nFor the structure of the dataset look into the dataset viewer.## Dataset Creation\n\nCheck out the Thesis### Curation Rationale\n\n\n\nTo help the financial industry develop the best model to detect fraudulent activities which can save billions of dollars for government and banks#### Data Collection and Processing\n\nPlease Refer to the Thesis## Dataset Card Authors\n\nAmit Kedia" ]
cbf63918eb06139b74f9bcc1b3c473fbb7321121
# Dataset Card for Evaluation run of AA051610/A12P <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [AA051610/A12P](https://huggingface.co/AA051610/A12P) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_AA051610__A12P", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T06:57:37.769631](https://huggingface.co/datasets/open-llm-leaderboard/details_AA051610__A12P/blob/main/results_2023-12-12T06-57-37.769631.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.6960316046766869, "acc_stderr": 0.030603322312658773, "acc_norm": 0.7008390046754603, "acc_norm_stderr": 0.031195836387238177, "mc1": 0.4565483476132191, "mc1_stderr": 0.01743728095318369, "mc2": 0.6222123218100973, "mc2_stderr": 0.015308802289329178 }, "harness|arc:challenge|25": { "acc": 0.6254266211604096, "acc_stderr": 0.014144193471893454, "acc_norm": 0.64419795221843, "acc_norm_stderr": 0.01399057113791876 }, "harness|hellaswag|10": { "acc": 0.6210914160525791, "acc_stderr": 0.004841238763529372, "acc_norm": 0.8232423819956184, "acc_norm_stderr": 0.0038068384481617415 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.32, "acc_stderr": 0.046882617226215034, "acc_norm": 0.32, "acc_norm_stderr": 0.046882617226215034 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.6074074074074074, "acc_stderr": 0.04218506215368879, "acc_norm": 0.6074074074074074, "acc_norm_stderr": 0.04218506215368879 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.8355263157894737, "acc_stderr": 0.030167533468632723, "acc_norm": 0.8355263157894737, "acc_norm_stderr": 0.030167533468632723 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.77, "acc_stderr": 0.04229525846816505, "acc_norm": 0.77, "acc_norm_stderr": 0.04229525846816505 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.7320754716981132, "acc_stderr": 0.027257260322494845, "acc_norm": 0.7320754716981132, "acc_norm_stderr": 0.027257260322494845 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.7847222222222222, "acc_stderr": 0.034370793441061344, "acc_norm": 0.7847222222222222, "acc_norm_stderr": 0.034370793441061344 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.44, "acc_stderr": 0.049888765156985884, "acc_norm": 0.44, "acc_norm_stderr": 0.049888765156985884 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.59, "acc_stderr": 0.04943110704237101, "acc_norm": 0.59, "acc_norm_stderr": 0.04943110704237101 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.39, "acc_stderr": 0.04902071300001974, "acc_norm": 0.39, "acc_norm_stderr": 0.04902071300001974 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.6242774566473989, "acc_stderr": 0.036928207672648664, "acc_norm": 0.6242774566473989, "acc_norm_stderr": 0.036928207672648664 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.45098039215686275, "acc_stderr": 0.04951218252396264, "acc_norm": 0.45098039215686275, "acc_norm_stderr": 0.04951218252396264 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.81, "acc_stderr": 0.039427724440366234, "acc_norm": 0.81, "acc_norm_stderr": 0.039427724440366234 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.7319148936170212, "acc_stderr": 0.028957342788342343, "acc_norm": 0.7319148936170212, "acc_norm_stderr": 0.028957342788342343 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.5, "acc_stderr": 0.047036043419179864, "acc_norm": 0.5, "acc_norm_stderr": 0.047036043419179864 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.7103448275862069, "acc_stderr": 0.03780019230438015, "acc_norm": 0.7103448275862069, "acc_norm_stderr": 0.03780019230438015 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.5714285714285714, "acc_stderr": 0.025487187147859375, "acc_norm": 0.5714285714285714, "acc_norm_stderr": 0.025487187147859375 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.46825396825396826, "acc_stderr": 0.04463112720677171, "acc_norm": 0.46825396825396826, "acc_norm_stderr": 0.04463112720677171 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.46, "acc_stderr": 0.05009082659620332, "acc_norm": 0.46, "acc_norm_stderr": 0.05009082659620332 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.8483870967741935, "acc_stderr": 0.02040261665441676, "acc_norm": 0.8483870967741935, "acc_norm_stderr": 0.02040261665441676 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.5566502463054187, "acc_stderr": 0.03495334582162934, "acc_norm": 0.5566502463054187, "acc_norm_stderr": 0.03495334582162934 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.73, "acc_stderr": 0.044619604333847394, "acc_norm": 0.73, "acc_norm_stderr": 0.044619604333847394 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.8121212121212121, "acc_stderr": 0.03050193405942914, "acc_norm": 0.8121212121212121, "acc_norm_stderr": 0.03050193405942914 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.8737373737373737, "acc_stderr": 0.023664359402880215, "acc_norm": 0.8737373737373737, "acc_norm_stderr": 0.023664359402880215 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.917098445595855, "acc_stderr": 0.01989934131572178, "acc_norm": 0.917098445595855, "acc_norm_stderr": 0.01989934131572178 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.7410256410256411, "acc_stderr": 0.022211106810061665, "acc_norm": 0.7410256410256411, "acc_norm_stderr": 0.022211106810061665 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.337037037037037, "acc_stderr": 0.028820884666253255, "acc_norm": 0.337037037037037, "acc_norm_stderr": 0.028820884666253255 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.8151260504201681, "acc_stderr": 0.025215992877954202, "acc_norm": 0.8151260504201681, "acc_norm_stderr": 0.025215992877954202 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.41721854304635764, "acc_stderr": 0.040261414976346104, "acc_norm": 0.41721854304635764, "acc_norm_stderr": 0.040261414976346104 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.8642201834862385, "acc_stderr": 0.01468690755634002, "acc_norm": 0.8642201834862385, "acc_norm_stderr": 0.01468690755634002 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.5601851851851852, "acc_stderr": 0.0338517797604481, "acc_norm": 0.5601851851851852, "acc_norm_stderr": 0.0338517797604481 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.8627450980392157, "acc_stderr": 0.024152225962801588, "acc_norm": 0.8627450980392157, "acc_norm_stderr": 0.024152225962801588 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.8776371308016878, "acc_stderr": 0.021331741829746793, "acc_norm": 0.8776371308016878, "acc_norm_stderr": 0.021331741829746793 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.7668161434977578, "acc_stderr": 0.028380391147094706, "acc_norm": 0.7668161434977578, "acc_norm_stderr": 0.028380391147094706 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.8625954198473282, "acc_stderr": 0.030194823996804475, "acc_norm": 0.8625954198473282, "acc_norm_stderr": 0.030194823996804475 }, "harness|hendrycksTest-international_law|5": { "acc": 0.8347107438016529, "acc_stderr": 0.03390780612972776, "acc_norm": 0.8347107438016529, "acc_norm_stderr": 0.03390780612972776 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.8425925925925926, "acc_stderr": 0.035207039905179635, "acc_norm": 0.8425925925925926, "acc_norm_stderr": 0.035207039905179635 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.8159509202453987, "acc_stderr": 0.03044677768797174, "acc_norm": 0.8159509202453987, "acc_norm_stderr": 0.03044677768797174 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.6071428571428571, "acc_stderr": 0.046355501356099754, "acc_norm": 0.6071428571428571, "acc_norm_stderr": 0.046355501356099754 }, "harness|hendrycksTest-management|5": { "acc": 0.8252427184466019, "acc_stderr": 0.0376017800602662, "acc_norm": 0.8252427184466019, "acc_norm_stderr": 0.0376017800602662 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8974358974358975, "acc_stderr": 0.019875655027867457, "acc_norm": 0.8974358974358975, "acc_norm_stderr": 0.019875655027867457 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.8, "acc_stderr": 0.04020151261036846, "acc_norm": 0.8, "acc_norm_stderr": 0.04020151261036846 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.8876117496807152, "acc_stderr": 0.01129454135121655, "acc_norm": 0.8876117496807152, "acc_norm_stderr": 0.01129454135121655 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.7658959537572254, "acc_stderr": 0.022797110278071128, "acc_norm": 0.7658959537572254, "acc_norm_stderr": 0.022797110278071128 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.5027932960893855, "acc_stderr": 0.01672224059549172, "acc_norm": 0.5027932960893855, "acc_norm_stderr": 0.01672224059549172 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.761437908496732, "acc_stderr": 0.024404394928087873, "acc_norm": 0.761437908496732, "acc_norm_stderr": 0.024404394928087873 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.7491961414790996, "acc_stderr": 0.024619771956697168, "acc_norm": 0.7491961414790996, "acc_norm_stderr": 0.024619771956697168 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.8024691358024691, "acc_stderr": 0.02215288992789896, "acc_norm": 0.8024691358024691, "acc_norm_stderr": 0.02215288992789896 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.5531914893617021, "acc_stderr": 0.02965823509766691, "acc_norm": 0.5531914893617021, "acc_norm_stderr": 0.02965823509766691 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.5338983050847458, "acc_stderr": 0.012740853872949834, "acc_norm": 0.5338983050847458, "acc_norm_stderr": 0.012740853872949834 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.6764705882352942, "acc_stderr": 0.02841820861940675, "acc_norm": 0.6764705882352942, "acc_norm_stderr": 0.02841820861940675 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.75, "acc_stderr": 0.01751781884501444, "acc_norm": 0.75, "acc_norm_stderr": 0.01751781884501444 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.7181818181818181, "acc_stderr": 0.043091187099464585, "acc_norm": 0.7181818181818181, "acc_norm_stderr": 0.043091187099464585 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.763265306122449, "acc_stderr": 0.02721283588407315, "acc_norm": 0.763265306122449, "acc_norm_stderr": 0.02721283588407315 }, "harness|hendrycksTest-sociology|5": { "acc": 0.8706467661691543, "acc_stderr": 0.023729830881018512, "acc_norm": 0.8706467661691543, "acc_norm_stderr": 0.023729830881018512 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.87, "acc_stderr": 0.03379976689896309, "acc_norm": 0.87, "acc_norm_stderr": 0.03379976689896309 }, "harness|hendrycksTest-virology|5": { "acc": 0.572289156626506, "acc_stderr": 0.038515976837185335, "acc_norm": 0.572289156626506, "acc_norm_stderr": 0.038515976837185335 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8421052631578947, "acc_stderr": 0.027966785859160886, "acc_norm": 0.8421052631578947, "acc_norm_stderr": 0.027966785859160886 }, "harness|truthfulqa:mc|0": { "mc1": 0.4565483476132191, "mc1_stderr": 0.01743728095318369, "mc2": 0.6222123218100973, "mc2_stderr": 0.015308802289329178 }, "harness|winogrande|5": { "acc": 0.7963693764798737, "acc_stderr": 0.011317798781626915 }, "harness|gsm8k|5": { "acc": 0.5329795299469295, "acc_stderr": 0.013742492794163425 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_AA051610__A12P
[ "region:us" ]
2023-12-12T07:00:27+00:00
{"pretty_name": "Evaluation run of AA051610/A12P", "dataset_summary": "Dataset automatically created during the evaluation run of model [AA051610/A12P](https://huggingface.co/AA051610/A12P) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_AA051610__A12P\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T06:57:37.769631](https://huggingface.co/datasets/open-llm-leaderboard/details_AA051610__A12P/blob/main/results_2023-12-12T06-57-37.769631.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.6960316046766869,\n \"acc_stderr\": 0.030603322312658773,\n \"acc_norm\": 0.7008390046754603,\n \"acc_norm_stderr\": 0.031195836387238177,\n \"mc1\": 0.4565483476132191,\n \"mc1_stderr\": 0.01743728095318369,\n \"mc2\": 0.6222123218100973,\n \"mc2_stderr\": 0.015308802289329178\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.6254266211604096,\n \"acc_stderr\": 0.014144193471893454,\n \"acc_norm\": 0.64419795221843,\n \"acc_norm_stderr\": 0.01399057113791876\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6210914160525791,\n \"acc_stderr\": 0.004841238763529372,\n \"acc_norm\": 0.8232423819956184,\n \"acc_norm_stderr\": 0.0038068384481617415\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.32,\n \"acc_stderr\": 0.046882617226215034,\n \"acc_norm\": 0.32,\n \"acc_norm_stderr\": 0.046882617226215034\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.6074074074074074,\n \"acc_stderr\": 0.04218506215368879,\n \"acc_norm\": 0.6074074074074074,\n \"acc_norm_stderr\": 0.04218506215368879\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.8355263157894737,\n \"acc_stderr\": 0.030167533468632723,\n \"acc_norm\": 0.8355263157894737,\n \"acc_norm_stderr\": 0.030167533468632723\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.77,\n \"acc_stderr\": 0.04229525846816505,\n \"acc_norm\": 0.77,\n \"acc_norm_stderr\": 0.04229525846816505\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.7320754716981132,\n \"acc_stderr\": 0.027257260322494845,\n \"acc_norm\": 0.7320754716981132,\n \"acc_norm_stderr\": 0.027257260322494845\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.7847222222222222,\n \"acc_stderr\": 0.034370793441061344,\n \"acc_norm\": 0.7847222222222222,\n \"acc_norm_stderr\": 0.034370793441061344\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.44,\n \"acc_stderr\": 0.049888765156985884,\n \"acc_norm\": 0.44,\n \"acc_norm_stderr\": 0.049888765156985884\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.59,\n \"acc_stderr\": 0.04943110704237101,\n \"acc_norm\": 0.59,\n \"acc_norm_stderr\": 0.04943110704237101\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.39,\n \"acc_stderr\": 0.04902071300001974,\n \"acc_norm\": 0.39,\n \"acc_norm_stderr\": 0.04902071300001974\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.6242774566473989,\n \"acc_stderr\": 0.036928207672648664,\n \"acc_norm\": 0.6242774566473989,\n \"acc_norm_stderr\": 0.036928207672648664\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.45098039215686275,\n \"acc_stderr\": 0.04951218252396264,\n \"acc_norm\": 0.45098039215686275,\n \"acc_norm_stderr\": 0.04951218252396264\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.81,\n \"acc_stderr\": 0.039427724440366234,\n \"acc_norm\": 0.81,\n \"acc_norm_stderr\": 0.039427724440366234\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.7319148936170212,\n \"acc_stderr\": 0.028957342788342343,\n \"acc_norm\": 0.7319148936170212,\n \"acc_norm_stderr\": 0.028957342788342343\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.5,\n \"acc_stderr\": 0.047036043419179864,\n \"acc_norm\": 0.5,\n \"acc_norm_stderr\": 0.047036043419179864\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.7103448275862069,\n \"acc_stderr\": 0.03780019230438015,\n \"acc_norm\": 0.7103448275862069,\n \"acc_norm_stderr\": 0.03780019230438015\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.5714285714285714,\n \"acc_stderr\": 0.025487187147859375,\n \"acc_norm\": 0.5714285714285714,\n \"acc_norm_stderr\": 0.025487187147859375\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.46825396825396826,\n \"acc_stderr\": 0.04463112720677171,\n \"acc_norm\": 0.46825396825396826,\n \"acc_norm_stderr\": 0.04463112720677171\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.46,\n \"acc_stderr\": 0.05009082659620332,\n \"acc_norm\": 0.46,\n \"acc_norm_stderr\": 0.05009082659620332\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.8483870967741935,\n \"acc_stderr\": 0.02040261665441676,\n \"acc_norm\": 0.8483870967741935,\n \"acc_norm_stderr\": 0.02040261665441676\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.5566502463054187,\n \"acc_stderr\": 0.03495334582162934,\n \"acc_norm\": 0.5566502463054187,\n \"acc_norm_stderr\": 0.03495334582162934\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.73,\n \"acc_stderr\": 0.044619604333847394,\n \"acc_norm\": 0.73,\n \"acc_norm_stderr\": 0.044619604333847394\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.8121212121212121,\n \"acc_stderr\": 0.03050193405942914,\n \"acc_norm\": 0.8121212121212121,\n \"acc_norm_stderr\": 0.03050193405942914\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.8737373737373737,\n \"acc_stderr\": 0.023664359402880215,\n \"acc_norm\": 0.8737373737373737,\n \"acc_norm_stderr\": 0.023664359402880215\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.917098445595855,\n \"acc_stderr\": 0.01989934131572178,\n \"acc_norm\": 0.917098445595855,\n \"acc_norm_stderr\": 0.01989934131572178\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.7410256410256411,\n \"acc_stderr\": 0.022211106810061665,\n \"acc_norm\": 0.7410256410256411,\n \"acc_norm_stderr\": 0.022211106810061665\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.337037037037037,\n \"acc_stderr\": 0.028820884666253255,\n \"acc_norm\": 0.337037037037037,\n \"acc_norm_stderr\": 0.028820884666253255\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.8151260504201681,\n \"acc_stderr\": 0.025215992877954202,\n \"acc_norm\": 0.8151260504201681,\n \"acc_norm_stderr\": 0.025215992877954202\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.41721854304635764,\n \"acc_stderr\": 0.040261414976346104,\n \"acc_norm\": 0.41721854304635764,\n \"acc_norm_stderr\": 0.040261414976346104\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.8642201834862385,\n \"acc_stderr\": 0.01468690755634002,\n \"acc_norm\": 0.8642201834862385,\n \"acc_norm_stderr\": 0.01468690755634002\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.5601851851851852,\n \"acc_stderr\": 0.0338517797604481,\n \"acc_norm\": 0.5601851851851852,\n \"acc_norm_stderr\": 0.0338517797604481\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.8627450980392157,\n \"acc_stderr\": 0.024152225962801588,\n \"acc_norm\": 0.8627450980392157,\n \"acc_norm_stderr\": 0.024152225962801588\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.8776371308016878,\n \"acc_stderr\": 0.021331741829746793,\n \"acc_norm\": 0.8776371308016878,\n \"acc_norm_stderr\": 0.021331741829746793\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.7668161434977578,\n \"acc_stderr\": 0.028380391147094706,\n \"acc_norm\": 0.7668161434977578,\n \"acc_norm_stderr\": 0.028380391147094706\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.8625954198473282,\n \"acc_stderr\": 0.030194823996804475,\n \"acc_norm\": 0.8625954198473282,\n \"acc_norm_stderr\": 0.030194823996804475\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.8347107438016529,\n \"acc_stderr\": 0.03390780612972776,\n \"acc_norm\": 0.8347107438016529,\n \"acc_norm_stderr\": 0.03390780612972776\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.8425925925925926,\n \"acc_stderr\": 0.035207039905179635,\n \"acc_norm\": 0.8425925925925926,\n \"acc_norm_stderr\": 0.035207039905179635\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.8159509202453987,\n \"acc_stderr\": 0.03044677768797174,\n \"acc_norm\": 0.8159509202453987,\n \"acc_norm_stderr\": 0.03044677768797174\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.6071428571428571,\n \"acc_stderr\": 0.046355501356099754,\n \"acc_norm\": 0.6071428571428571,\n \"acc_norm_stderr\": 0.046355501356099754\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.8252427184466019,\n \"acc_stderr\": 0.0376017800602662,\n \"acc_norm\": 0.8252427184466019,\n \"acc_norm_stderr\": 0.0376017800602662\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8974358974358975,\n \"acc_stderr\": 0.019875655027867457,\n \"acc_norm\": 0.8974358974358975,\n \"acc_norm_stderr\": 0.019875655027867457\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.8,\n \"acc_stderr\": 0.04020151261036846,\n \"acc_norm\": 0.8,\n \"acc_norm_stderr\": 0.04020151261036846\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.8876117496807152,\n \"acc_stderr\": 0.01129454135121655,\n \"acc_norm\": 0.8876117496807152,\n \"acc_norm_stderr\": 0.01129454135121655\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.7658959537572254,\n \"acc_stderr\": 0.022797110278071128,\n \"acc_norm\": 0.7658959537572254,\n \"acc_norm_stderr\": 0.022797110278071128\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.5027932960893855,\n \"acc_stderr\": 0.01672224059549172,\n \"acc_norm\": 0.5027932960893855,\n \"acc_norm_stderr\": 0.01672224059549172\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.761437908496732,\n \"acc_stderr\": 0.024404394928087873,\n \"acc_norm\": 0.761437908496732,\n \"acc_norm_stderr\": 0.024404394928087873\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.7491961414790996,\n \"acc_stderr\": 0.024619771956697168,\n \"acc_norm\": 0.7491961414790996,\n \"acc_norm_stderr\": 0.024619771956697168\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.8024691358024691,\n \"acc_stderr\": 0.02215288992789896,\n \"acc_norm\": 0.8024691358024691,\n \"acc_norm_stderr\": 0.02215288992789896\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.5531914893617021,\n \"acc_stderr\": 0.02965823509766691,\n \"acc_norm\": 0.5531914893617021,\n \"acc_norm_stderr\": 0.02965823509766691\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.5338983050847458,\n \"acc_stderr\": 0.012740853872949834,\n \"acc_norm\": 0.5338983050847458,\n \"acc_norm_stderr\": 0.012740853872949834\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.6764705882352942,\n \"acc_stderr\": 0.02841820861940675,\n \"acc_norm\": 0.6764705882352942,\n \"acc_norm_stderr\": 0.02841820861940675\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.75,\n \"acc_stderr\": 0.01751781884501444,\n \"acc_norm\": 0.75,\n \"acc_norm_stderr\": 0.01751781884501444\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.7181818181818181,\n \"acc_stderr\": 0.043091187099464585,\n \"acc_norm\": 0.7181818181818181,\n \"acc_norm_stderr\": 0.043091187099464585\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.763265306122449,\n \"acc_stderr\": 0.02721283588407315,\n \"acc_norm\": 0.763265306122449,\n \"acc_norm_stderr\": 0.02721283588407315\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.8706467661691543,\n \"acc_stderr\": 0.023729830881018512,\n \"acc_norm\": 0.8706467661691543,\n \"acc_norm_stderr\": 0.023729830881018512\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.87,\n \"acc_stderr\": 0.03379976689896309,\n \"acc_norm\": 0.87,\n \"acc_norm_stderr\": 0.03379976689896309\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.572289156626506,\n \"acc_stderr\": 0.038515976837185335,\n \"acc_norm\": 0.572289156626506,\n \"acc_norm_stderr\": 0.038515976837185335\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8421052631578947,\n \"acc_stderr\": 0.027966785859160886,\n \"acc_norm\": 0.8421052631578947,\n \"acc_norm_stderr\": 0.027966785859160886\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.4565483476132191,\n \"mc1_stderr\": 0.01743728095318369,\n \"mc2\": 0.6222123218100973,\n \"mc2_stderr\": 0.015308802289329178\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7963693764798737,\n \"acc_stderr\": 0.011317798781626915\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.5329795299469295,\n \"acc_stderr\": 0.013742492794163425\n }\n}\n```", "repo_url": "https://huggingface.co/AA051610/A12P", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|arc:challenge|25_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|gsm8k|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hellaswag|10_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T06-57-37.769631.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["**/details_harness|winogrande|5_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T06-57-37.769631.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T06_57_37.769631", "path": ["results_2023-12-12T06-57-37.769631.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T06-57-37.769631.parquet"]}]}]}
2023-12-12T07:01:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of AA051610/A12P Dataset automatically created during the evaluation run of model AA051610/A12P on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T06:57:37.769631(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of AA051610/A12P\n\n\n\nDataset automatically created during the evaluation run of model AA051610/A12P on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T06:57:37.769631(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of AA051610/A12P\n\n\n\nDataset automatically created during the evaluation run of model AA051610/A12P on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T06:57:37.769631(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 177, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of AA051610/A12P\n\n\n\nDataset automatically created during the evaluation run of model AA051610/A12P on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T06:57:37.769631(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
b68e196efae75f3f94b83ea110263b53d7edf89a
# Dataset Card for "gsm8k-processed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tianduo/gsm8k-split
[ "region:us" ]
2023-12-12T07:01:51+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "dev", "path": "data/dev-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "ans", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 3607636, "num_examples": 6705}, {"name": "dev", "num_bytes": 415350, "num_examples": 768}, {"name": "test", "num_bytes": 724284, "num_examples": 1319}], "download_size": 2749891, "dataset_size": 4747270}}
2023-12-28T04:00:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "gsm8k-processed" More Information needed
[ "# Dataset Card for \"gsm8k-processed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"gsm8k-processed\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"gsm8k-processed\"\n\nMore Information needed" ]
9bb5dbb508df39d921c003fcf421f45f1a8886e9
## 小样本实体识别 收集实体识别的数据集, 将其整理成 prompt-response 的形式. 基于语言模型的实体识别. 该数据集可用于: 1. 指令语言模型训练. 2. 数据集创建. (特定领域有少量标注数据时, 可与此数据集一起训练模型, 然后生成样本用于数据标注). 在 prompt 生成过程中会加入一些 `示例`, 我们尽量使各实体的标签满足 `n_way, n_shot`. ### 样本示例 目前有三种实体标注的格式: * (1)句子重写. 比如 `"今天天气怎样"` 改写为 `"<date>今天</date>天气怎么"`. 这种方式的好处是能够从结果推断出实体的具体位置. * (2)json格式. 比如 `"今天天气怎样"` 输出 `{"entity_text": "今天", "entity_type": "date"}`. 这种方式输出的 json 字符串可能会有重复的. 因为句子中可能会有两个一样的实体. * (3)实体类型 - 实体原始字符串. 比如 `"今天天气怎样"` 输出 `date: 今天.`. 这种方式输出的实体, 每行文本以 `实体类型` 开始, 冒号 `:` 后面接实体原始字符串 `今天`. 如果 `实体原始字符串` 中有重复的, 目前会去重复. <details> <summary>cmeee_prompt</summary> <pre><code>实体识别.<br> 请你对句子进行重写并标注出其中包含的实体.<br> 实体有: - microbe - drug - procedure - item - disease<br> 示例: ------ text: (二)感染因素病原微生物能否引起肠道感染,取决于宿主防御功能的强弱、感染量的大小以及微生物毒力(黏附性、产毒性及侵袭性)、细胞毒性,其中微生物的黏附能力对于肠道感染至关重要。 rewrited: (二)&lt;disease&gt;感染&lt;/disease&gt;因素&lt;microbe&gt;病原微生物&lt;/microbe&gt;能否引起&lt;disease&gt;肠道感染&lt;/disease&gt;,取决于宿主&lt;item&gt;防御功能&lt;/item&gt;的强弱、&lt;item&gt;感染量&lt;/item&gt;的大小以及&lt;microbe&gt;微生物&lt;/microbe&gt;毒力(黏附性、产毒性及侵袭性)、细胞毒性,其中&lt;microbe&gt;微生物&lt;/microbe&gt;的黏附能力对于&lt;disease&gt;肠道感染&lt;/disease&gt;至关重要。 ------ text: (三)幽门螺杆菌阴性消化性溃疡的传统治疗在下述药物中,以H2&lt;/sub&gt;受体阻滞剂应用最多,其机制为抑制组胺对壁细胞的泌酸作用,但对于胆碱能神经或胃泌素合并的餐后胃酸分泌影响较小。 rewrited: (三)&lt;disease&gt;幽门螺杆菌阴性消化性溃疡&lt;/disease&gt;的&lt;procedure&gt;传统治疗&lt;/procedure&gt;在下述药物中,以&lt;drug&gt;H2&lt;/sub&gt;受体阻滞剂&lt;/drug&gt;应用最多,其机制为抑制组胺对壁细胞的泌酸作用,但对于胆碱能神经或胃泌素合并的餐后胃酸分泌影响较小。 ------ text: 用本法显影受肾功能的影响很小,有人估计,只要残留肾功能在3%以上,本法即可勉强显影。 rewrited: </code></pre> </details> <details> <summary>cmeee_prompt</summary> <pre><code>请对句子进行重写以标注出其中的 “药剂”,“科室”,“发病症状” 实体。<br> 示例: ``` text:代谢性碱中毒的发生,可能与排钠钾性利尿剂的持续使用有关,造成肾小管对钠钾的重吸收障碍,同时利尿剂抑制了对氯离子的重吸收,则发生低氯性代谢性碱中毒。 rewrited:代谢性碱中毒的发生,可能与&lt;药剂&gt;排钠钾性利尿剂&lt;/药剂&gt;的持续使用有关,造成肾小管对钠钾的重吸收障碍,同时&lt;药剂&gt;利尿剂&lt;/药剂&gt;抑制了对氯离子的重吸收,则发生低氯性代谢性碱中毒。 ``` text:参考文献1.胡亚美,江载芳.诸福棠实用儿科学.第7版.北京:人民卫生出版社,20022.江载芳.实用小儿呼吸病学.北京:人民卫生出版社,2010:2253.中华医学会儿科分会呼吸学组.儿童侵袭性肺部真菌感染诊治指南(儿童侵袭性真菌感染州医学.2009,22(3):183-1834.肖晶,周东风,孟浦.儿童侵袭性真菌感染早期诊断的研究进展.国际儿科学杂志,2009,36(5):523-5235.KlontRR,MenninkKerstenMH,RuegebrinkW,etal.ParadoxicalincreaseincirculatingHspergillusantigenduringtreatmentwithcaspofungininapatientwithpulmonaryaspergillosis.ClinInfectDis,2006,43(3):23-236.AgarwalR.Allergicbronchopulmonaryaspergillosis.Chest,2009,135:805-8267.ChabraSK,SahayandS,RamarajuK.Allergicbronchopulmonaryaspergillosiscomplicatingchildhoodasthma.IndianJPediatric,2009,76(3):331-331 rewrited:参考文献1.胡亚美,江载芳.诸福棠实用&lt;科室&gt;儿科&lt;/科室&gt;学.第7版.北京:人民卫生出版社,20022.江载芳.实用小儿呼吸病学.北京:人民卫生出版社,2010:2253.中华医学会&lt;科室&gt;儿科&lt;/科室&gt;分会呼吸学组.儿童侵袭性肺部真菌感染诊治指南(儿童侵袭性真菌感染州医学.2009,22(3):183-1834.肖晶,周东风,孟浦.儿童侵袭性真菌感染早期诊断的研究进展.国际&lt;科室&gt;儿科&lt;/科室&gt;学杂志,2009,36(5):523-5235.KlontRR,MenninkKerstenMH,RuegebrinkW,etal.ParadoxicalincreaseincirculatingHspergillusantigenduringtreatmentwithcaspofungininapatientwithpulmonaryaspergillosis.ClinInfectDis,2006,43(3):23-236.AgarwalR.Allergicbronchopulmonaryaspergillosis.Chest,2009,135:805-8267.ChabraSK,SahayandS,RamarajuK.Allergicbronchopulmonaryaspergillosiscomplicatingchildhoodasthma.IndianJPediatric,2009,76(3):331-331 ``` text:间接法则是在进行肾肾动态显像很少时,令受检者尽量憋住尿,然后用力排尿,在此过程中用γ相机快速照相,也可观察有无膀胱输尿管反流的存在和程度。 rewrited: </code></pre> </details> <details> <summary>ccks2019_task1_prompt</summary> <pre><code>识别医学方面的实体,如:实验室检验,药物,影像检查,疾病和诊断,解剖部位,手术 等。<br> 示例: ------ raw_text:入院前14年患者检查发现血糖升高,血糖具体值不详,有口干、多饮、多尿等症状,伴四肢麻木,并有感觉异常,自觉肢体冰凉,伴视物模糊,于外院诊断为“2型糖尿病 糖尿病周围神经病变 糖尿病眼病”,后长期服用“格列美脲、伏格列波糖、罗格列酮”等降血糖,半年前于我科住院,出院后使用“来得时+阿卡波糖”降糖,但院外患者自行加用“格列美脲”降糖,偶有心慌冒汗症状。1+年前患者因活动后心累心悸,偶有干咳,感肢体麻木冰凉较明显,伴有腰部及上肢疼痛,自觉健忘症状加重,于我院内一科住院,查肾功:尿素 16.00 mmol/L、肌酐 141.7 μmol/l,诊断为“糖尿病肾病、原发性高血压3级极高危 高血压性心脏病”,治疗后好转出院;半年前因腹泻于我科住院,检查肾功:尿素 19.79 mmol/L、肌酐 225.2 μmol/l及其他检查,诊断“1.2型糖尿病 糖尿病周围神经病变 糖尿病眼病 糖尿病肾病 糖尿病植物神经病变 2.原发性高血压3级 很高危 高血压性心脏病 3.重度骨质疏松 4、甲状腺功能减退 5、冠状动脉粥样硬化心脏病”,治疗后好转出院,出院后未定期随访复查,并自行停用相关药物。3+月前患者感活动及爬坡后心累、气促明显,稍休息后能好转,并出现头昏痛,无视物旋转,无耳鸣,无发热,无呕吐,无黑便等,在院外自行服药(具体药名不详),效果欠佳,今为求治疗,来我科要求住院治疗。以“2型糖尿病、糖尿病肾病”收入院。患者此次发病以来精神食欲一般,长期睡眠差,经常服用“安眠药”,大便正常,诉长期解小便色偏黄,尿量不详,体重无明显上降。 outputs: 疾病和诊断:糖尿病植物神经病变;原发性高血压3级 很高危;糖尿病肾病;高血压性心脏病;糖尿病眼病;冠状动脉粥样硬化心脏病;血糖升高;重度骨质疏松;糖尿病周围神经病变;腹泻;2型糖尿病;甲状腺功能减退;原发性高血压3级极高危。 解剖部位:腰部;肢体;上肢;四肢;耳;心;头。 药物:阿卡波糖;罗格列酮;伏格列波糖;来得时;格列美脲。 实验室检验:尿素;肌酐。 ------ raw_text:,患者2年余前(2009.11)因“腹泻”就诊********查肠镜提示升结肠癌,行升结肠癌根治术,手术及术后病理不详,术后行9个周期FOLFOX化疗,末次化疗2010.4结束。化疗后患者出现II度的神经毒性,主要表现为手足麻木明显。患者2011.1.6复查CT发现肝脏转移瘤,2011.1.12行肝转移瘤微波固化术,2011.2.22CT提示肝转移,两上肺转移瘤,2011.2.23再次行肝转移瘤微波固化术。2011.3.1起行FOLFIRI方案化疗8次,末次2011.7.28,总体疗效SD。此后患者停止化疗。2011-11-24,我院CT示:“,与2011-10-21片对比:两肺多发转移瘤较前增大;肝内多发病灶,较前明显增多、增大。腹腔干旁及肠系膜根部软组织影及结节灶,考虑淋巴结转移可能性大,较前明显。子宫直肠窝内多发结节,考虑种植转移可能性大。膀胱后壁结节状增厚,考虑转移瘤可能性大。右侧髂内外血管旁淋巴结,可疑转移。”于2011-11-25、12-9、12-27、2012-1-12行CPT-11+C225化疗4程。近来,患者自觉左中上腹疼痛,胃纳食欲不佳,大便干,血尿,伴尿频、尿痛,偶有尿失禁,近2天乏力明显,体重近期上降2KG。 outputs: 解剖部位:右侧髂内外血管旁淋巴结;腹;肠;手;胃;腹腔干旁;足;左中上腹;肝;子宫直肠窝;膀胱。 疾病和诊断:肝脏转移瘤;升结肠癌;两肺多发转移瘤;肝转移,两上肺转移瘤。 手术:肝转移瘤微波固化术;升结肠癌根治术。 影像检查:CT。 药物:CPT-11;C225。 ------<br> 注意: 1. 每个实体类型占一行。 2. 实体类型起头,实体原始文本用 ";" 隔开。例如:entity_name:entity_text1;entity_text2。<br> text: ------ ,患者4个月前因“便秘2月余”入住我院消化内科,行胸、腹、,盆腔CT示:两肺上叶纤维灶,肝多发囊肿、肾囊肿,直肠壁增厚、符合直肠CA表现。为行手术治疗,转我科。完善相关术前检查后,于2016-02-15行直肠癌根治术,,术后病理:直肠腺癌(中低度分化),部分呈粘液腺癌图像,浸润溃疡型,体积6.5*5*0.6CM。 侵达被膜。 双端切线及另送“直肠近切线”、“直肠远切线”未查见癌。 呈肠壁一站(2/11个)淋巴结癌转移。 符合炎性增生性息肉。 ,免疫组化染色示:TS部分(+)、SYN(-)。术后给予补液,换药及对症支持治疗并口服希罗达行化学治疗。 院外期间患者一般情况可,无发热,无腹痛腹胀胀不适,现患者为行复查及本周期化疗再次来我院就诊,门诊以“直肠术后”收住入院。 患者自下次出院来,神志清,精神可,饮食尚可,大小便正常,体重无明显变化。 ------ output: </code></pre> </details> <details> <summary>ccks2019_task1_prompt</summary> <pre><code>实体识别。 需要识别的实体类型有: “手术”,“解剖部位”。<br> 注意: - 识别到的实体必须以 json 格式输出。其中包括 key: text: 实体原始文本。label: 实体类型标签。 - 注意一定要生成合法的 json 字符串。 - 示例中的实体标注的可能不是很精确。<br> 示例: ``` text:缘于1+月前患者因“CINIII级”在我院行“LEEP”术,术后病理报告(2014.10.27):(宫颈组织)宫颈粘膜慢性炎伴纳氏囊肿、糜烂,鳞状下皮CINIII级累及腺体,并伴局部区域微小浸润,宫颈管切端未见病变累及。建议行“残余子宫切除术”。遂今就诊我院,要求住院手术治疗,无阴道出血、异常排液,无发热、腹痛、腹胀、尿频、尿急等不适,故门诊拟“CINIII级”收入院。发病以来精神、睡眠、食欲尚可,大便如下述,小便正常,体重有明显减轻。 outputs: {"text": "“LEEP”术", "label": "手术"} {"text": "宫颈管", "label": "解剖部位"} {"text": "残余子宫切除术", "label": "手术"} {"text": "阴道", "label": "解剖部位"} {"text": "腹", "label": "解剖部位"} {"text": "腹", "label": "解剖部位"} ``` text:,患者2015-06因“无明显诱因出现大便带鲜血,量少,伴大便次数增加至2-3次/天5月”,外院肠镜检查提示乙状结肠距肛门18-20CM肿物,大小2×1CM,肠镜能通过,活检病理提示腺癌,大肠多发息肉。转诊我院2015-06-20在全麻上行DIXON术,,术后病理示:(乙状结肠大体)镜检为结肠中分化腺癌,浸润至肠壁浆膜上层,癌组织侵犯神经束,脉管内见癌栓;经多取材另见淋巴结2枚,未见癌。2(近切缘)未见癌。3(远切缘)未见癌。4(中央组淋巴结)1枚,未见癌。5(中间组淋巴结)2枚,1/2见腺癌转移。6(肠旁淋巴结)2枚,未见癌。,免疫组化:ER(-),ERCC1(+),MLH1(+),MSH2(+),MSH6(+),PMS2(+),KI67(70%+),分期PT3N1AM0,IIIB期。术顺,术后切口愈合佳。于2015-07-18、2015-08-07、2015-08-28行XELOX方案辅助化疗3程,过程顺利。3程化疗后于2015-09-17行胸片及彩超复查未见肿瘤复发转移征象。现患者为行上程化疗入院。近3周来,患者精神睡眠可,胃纳可,二便正常,体重无明显上降。 outputs: </code></pre> </details> <details> <summary>cluener2020_prompt</summary> <pre><code>实体识别。 以新浪新闻进行细粒度命名实体标注,实体类型包括:government,organization 等。<br> 小样本示例: ``` raw_text:3月17日,发现客户信用卡被人冒名补办的银行方,向督院街派出所报了案。案侦民警立即开始调查取证, outputs: government:向督院街派出所。 ``` raw_text:坐在方向盘前的正是有24年驾龄的公交司机冉涌。据万州公安交巡警支队通报, outputs: government:万州公安交巡警支队。 ``` raw_text:金石开:从往绩来看,阿森纳对桑德兰拥有压倒性的优势,英超以来交手全部获胜。不过,上赛季枪手两战“ outputs: organization:枪手;英超;桑德兰;阿森纳。 ```<br> 请从以下文本中识别实体。<br> 注意: 1. 用实体类型起头,后接实体原始文本,用 ":" 隔开。例如:entity_type:entity_text1;entity_text2;...。 2. 每个实体类型占一行。 3. 不要输出其它的实体类型。<br> text: ``` 随后报警。警方调取银行录像,认出取钱者为周清来,并将其抓获。周清来交代,取走的6. ``` outputs: </code></pre> </details> <details> <summary>cluener2020_prompt</summary> <pre><code>实体识别。 以新浪新闻进行细粒度命名实体标注,实体类型包括:government,organization 等。<br> 小样本示例: ``` raw_text:3月17日,发现客户信用卡被人冒名补办的银行方,向督院街派出所报了案。案侦民警立即开始调查取证, outputs: government:向督院街派出所。 ``` raw_text:坐在方向盘前的正是有24年驾龄的公交司机冉涌。据万州公安交巡警支队通报, outputs: government:万州公安交巡警支队。 ``` raw_text:金石开:从往绩来看,阿森纳对桑德兰拥有压倒性的优势,英超以来交手全部获胜。不过,上赛季枪手两战“ outputs: organization:枪手;英超;桑德兰;阿森纳。 ```<br> 请从以下文本中识别实体。<br> 注意: 1. 用实体类型起头,后接实体原始文本,用 ":" 隔开。例如:entity_type:entity_text1;entity_text2;...。 2. 每个实体类型占一行。 3. 不要输出其它的实体类型。<br> text: ``` 随后报警。警方调取银行录像,认出取钱者为周清来,并将其抓获。周清来交代,取走的6. ``` outputs: </code></pre> </details> <details> <summary>ecommerce_prompt</summary> <pre><code>从句子中识别出 “商品”,“型号” 实体,并以 json 格式输出。<br> 示例: ``` text:蜜蜂产品与保健,书籍,保养保健 outputs: {"entity_text": "书籍", "entity_type": "商品"} ``` text:时尚多款雨天防臭平底高筒雨鞋防水雨靴红蓝紫色单靴蓝色防滑胶鞋 outputs: {"entity_text": "雨鞋", "entity_type": "商品"} {"entity_text": "雨靴", "entity_type": "商品"} {"entity_text": "单靴", "entity_type": "商品"} {"entity_text": "胶鞋", "entity_type": "商品"} ``` text:足量,qd,mmc,plus卡,512m,内存卡,相机/老款手机,mmc,512m一体卡 outputs: {"entity_text": "qd", "entity_type": "型号"} {"entity_text": "mmc", "entity_type": "型号"} {"entity_text": "plus", "entity_type": "型号"} {"entity_text": "卡", "entity_type": "商品"} {"entity_text": "内存卡", "entity_type": "商品"} {"entity_text": "相机", "entity_type": "商品"} {"entity_text": "手机", "entity_type": "商品"} {"entity_text": "mmc", "entity_type": "型号"} {"entity_text": "512m", "entity_type": "型号"} {"entity_text": "一体卡", "entity_type": "商品"} ``` text:儿童披风秋冬新款男童女童斗篷宝宝披肩婴儿装棉呢子加厚外套风衣 outputs: </code></pre> </details> <details> <summary>ecommerce_prompt</summary> <pre><code>实体识别。<br> 请你对句子进行重写并标注出其中包含的实体:产品型号, 品牌, 产品, 其它实体。<br> 示例: ------ text:全新正品2脚放电管75v,2d075gdt,放电管2r075-8,防雷管2脚75v rewrited:全新正品2脚&lt;产品&gt;放电管&lt;/产品&gt;75v,&lt;产品型号&gt;2d075gdt&lt;/产品型号&gt;,&lt;产品&gt;放电管&lt;/产品&gt;&lt;产品型号&gt;2r075-8&lt;/产品型号&gt;,&lt;产品&gt;防雷管&lt;/产品&gt;2脚75v ------ text:标致5008后备箱垫七座专用5008汽车全包围尾箱垫标志5008内饰改装 rewrited:&lt;品牌&gt;标致&lt;/品牌&gt;&lt;产品型号&gt;5008&lt;/产品型号&gt;&lt;产品&gt;后备箱垫&lt;/产品&gt;七座专用&lt;产品型号&gt;5008&lt;/产品型号&gt;&lt;产品&gt;汽车&lt;/产品&gt;全包围&lt;产品&gt;尾箱垫&lt;/产品&gt;&lt;品牌&gt;标志&lt;/品牌&gt;&lt;产品型号&gt;5008&lt;/产品型号&gt;&lt;产品&gt;内饰&lt;/产品&gt;改装 ------ text:桂林特产7度漓泉纯生啤酒330ml&times rewrited:&lt;其它实体&gt;桂林&lt;/其它实体&gt;&lt;产品&gt;特产&lt;/产品&gt;&lt;其它实体&gt;7度&lt;/其它实体&gt;&lt;品牌&gt;漓泉&lt;/品牌&gt;&lt;产品&gt;纯生啤酒&lt;/产品&gt;&lt;其它实体&gt;330ml&lt;/其它实体&gt;&times ------ text:烤面筋 rewrited: &lt;产品&gt;烤面筋&lt;/产品&gt; </code></pre> </details> <details> <summary>nlpcc2018_task4_prompt</summary> <pre><code>从句子中识别出 “Age”,“Destination”,“Singer”,“Style”,“Custom Destination”,“Instrument” 实体。<br> 示例: ``` text:放一首儿歌给我放一首儿歌 outputs: Age:儿歌。 ``` text:我想去中国人民保险 outputs: Destination:中国人民保险。 ``` text:导航去茌平 outputs: Destination:茌平。 ``` text:播放diddy outputs: Singer:diddy。 ``` text:想听齐秦齐豫的心经 outputs: Singer:齐豫;齐秦。 ``` text:播放酒吧摇滚音乐 outputs: Style:摇滚。 ``` text:串烧乐曲 outputs: Style:串烧。 ``` text:我要回家 outputs: Custom Destination:家。 ``` text:我要去公司 outputs: Custom Destination:公司。 ``` text:播放锁那 outputs: Instrument:锁那。 ``` text:萨克斯 outputs: Instrument:萨克斯。 ``` text:播放广东雨神的广东爱情故事 outputs: </code></pre> </details> <details> <summary>nlpcc2018_task4_prompt</summary> <pre><code>虚拟助手槽位提取。<br> 请你对句子进行重写并标注出其中包含的槽值。<br> 需要的槽值有:乐器名称,语言,年代。<br> 示例: ### text:古筝音乐 rewrited:&lt;乐器名称&gt;古筝&lt;/乐器名称&gt;音乐 ### text:播放古筝 rewrited:播放&lt;乐器名称&gt;古筝&lt;/乐器名称&gt; ### text:多放一点dj dj的中文歌曲 rewrited:多放一点dj dj的&lt;语言&gt;中文&lt;/语言&gt;歌曲 ### text:播放藏族歌曲 rewrited:播放&lt;语言&gt;藏族&lt;/语言&gt;歌曲 ### text:播放儿歌蓝皮鼠和大脸猫的主题曲 rewrited:播放&lt;年代&gt;儿歌&lt;/年代&gt;蓝皮鼠和大脸猫的主题曲 ### text:播放儿童歌曲 rewrited:播放&lt;年代&gt;儿童&lt;/年代&gt;歌曲 ### text:播放前小白的关灯与神 rewrited: 播放前小白的关灯与神 </code></pre> </details> ### 数据来源 #### 汉语实体识别 | 数据 | 原始数据/项目地址 | 样本个数 | 实体类型 | 原始数据描述 | 替代数据下载地址 | | :--- | :---: | :---: | :---: | :---: | :---: | | CMeEE | [CBLUE](http://www.cips-chip.org.cn/2021/CBLUE); [天池下载](https://tianchi.aliyun.com/dataset/95414) | 20000 | 儿科疾病, 身体部位, 临床表现, 医疗程序, 等 9 大类医学实体 | 医学实体识别任务 | [nlhappy/CMeEE](https://huggingface.co/datasets/nlhappy/CMeEE) [Rosenberg/CMeEE-V2](https://huggingface.co/datasets/Rosenberg/CMeEE-V2) | | CCKS2019_task1 | [Yidu-S4K](http://openkg.cn/dataset/yidu-s4k) | 1379 | 解剖部位, 手术, 疾病和诊断, 药物, 实验室检验, 影像检查 | CCKS2019面向中文电子病历的命名实体识别数据集 | | | CLUENER2020 | [CLUE](https://www.cluebenchmarks.com/introduce.html); [CLUENER](https://storage.googleapis.com/cluebenchmark/tasks/cluener_public.zip) | 12091 | 游戏, 组织, 政府, 电影, 人名, 书籍, 公司, 场景, 职位, 地址 | CLUENER2020数据集 | | | MSRA | [MSRA](https://www.msra.cn/) | 48442 | 地址, 组织, 人名 | MSRA微软亚洲研究院开源命名实体识别数据集 | [doushabao4766/msra_ner_k_V3_wc_bioes](https://huggingface.co/datasets/doushabao4766/msra_ner_k_V3_wc_bioes); [msra_ner](https://huggingface.co/datasets/msra_ner) | | NLPCC2018_task4 | [NLPCC2018](http://tcci.ccf.org.cn/conference/2018/taskdata.php); [NLPCC2018_task4](http://tcci.ccf.org.cn/conference/2018/dldoc/trainingdata04.zip) | 21352 | 歌手, 歌曲, 主题, 情感, 风格, 目的地, 电话号码, 乐器, 联系人, 年龄, 热门列表, 自定义目的地, 语种, 场景, 出发地 | 任务型对话系统数据数据集 | | | CCFBDCI | [CCFBDCI填写申请表后可下载](https://www.datafountain.cn/competitions/510/datasets) | 15723 | LOC、GPE、ORG和PER | 中文命名实体识别算法鲁棒性评测数据集 | | | MMC | [MMC](https://tianchi.aliyun.com/competition/entrance/231687/information) [MMC数据集](https://aistudio.baidu.com/datasetdetail/146995) | 3498 | 实体类型 | 瑞金医院MMC人工智能辅助构建知识图谱大赛数据集 | | | WeiBo | [WeiBo](https://github.com/hltcoe/golden-horse/tree/master) | 1890 | LOC.NAM、LOC.NOM、PER.NAM、ORG.NOM、ORG.NAM、GPE.NAM和PER.NOM | 社交媒体中文命名实体识别数据集 | | | ECommerce | [ECommerce](https://github.com/allanj/ner_incomplete_annotation/tree/master) | 7998 | MISC、XH、HPPX和HCCX | 面向电商的命名实体识别数据集 | | | YouKu | [YouKu](https://github.com/allanj/ner_incomplete_annotation/tree/master) | | TELEVISION、PER、MISC | 优酷视频。 | | | FinanceSina | [FinanceSina](https://github.com/jiesutd/LatticeLSTM/tree/master) | 1579 | LOC、GPE、ORG和PER | 新浪财经爬取中文命名实体识别数据集 | | | Resume | [Resume](https://github.com/jiesutd/LatticeLSTM/tree/master/ResumeNER) | 4761 | NAME、EDU、LOC、ORG、PRO、TITLE、CONT和RACE | 中国股市上市公司高管的简历 | | | Bank | [Bank](https://www.heywhale.com/mw/dataset/617969ec768f3b0017862990/file) | 10000 | BANK、COMMENTS_ADJ、COMMENTS_N和PRODUCT | 银行借贷数据数据集 | | | DLNER | [DLNER](https://github.com/lancopku/Chinese-Literature-NER-RE-Dataset/tree/master) | 28897 | Location、Thing、Abstract、Organization、Metric、Time、Physical、Person和Term | 语篇级命名实体识别数据集 | | | people_daily | [china-people-daily-ner](https://tianchi.aliyun.com/dataset/128270); [china-people-daily-ner-corpus.tar.gz](http://s3.bmio.net/kashgari/china-people-daily-ner-corpus.tar.gz) | train: 20864; valid: 2318; test: 4636; | LOC; ORG; PER; | 1998人民日报序列标注; People's Daily(人民日报) dataset | [xusenlin/people-daily-ner](https://huggingface.co/datasets/xusenlin/people-daily-ner); [peoples_daily_ner](https://huggingface.co/datasets/peoples_daily_ner); [Chinese-NLP-Corpus](https://github.com/OYE93/Chinese-NLP-Corpus/tree/master/NER/People's%20Daily) | | conll2012_ontonotesv5_chinese_v4 | [ontonotes-conll2012](https://data.mendeley.com/datasets/zmycy7t9h9/2); [ontonotes-v5](https://paperswithcode.com/sota/named-entity-recognition-ner-on-ontonotes-v5) | train: 1.39K, valid: 172; test: 166; | 18类: PERSON, NORP, FAC, ORG, GPE, LOC, PRODUCT, DATE, TIME, PERCENT, MONEY, QUANTITY, ORDINAL, CARDINAL, EVENT, WORK_OF_ART, LAW, LANGUAGE, | OntoNotes v5.0是OntoNotes语料库的最终版本,是一个大规模、多体裁、多语言的人工标注句法、语义和话语信息的语料库。 | [conll2012_ontonotesv5](https://huggingface.co/datasets/conll2012_ontonotesv5); | #### 英语实体识别 | 数据 | 原始数据/项目地址 | 样本个数 | 实体类型 | 原始数据描述 | 替代数据下载地址 | | :--- | :---: | :---: | :---: | :---: | :---: | | limit | [LiMiT: The Literal Motion in Text Dataset](https://aclanthology.org/2020.findings-emnlp.88/) | train: 23559; valid: 1000 | 无实体类型 | motion 识别是许多生命形式的基本认知能力之一,但在自然语言中识别物理实体的 motion 尚未得到广泛和实证的探索。| [limit](https://huggingface.co/datasets/limit) | | conll2003 | [CoNLL-2003](https://aclanthology.org/W03-0419/) | train: 14K; valid: 3.25K; test: 3.45K | PER, ORG, LOC, MISC | CoNLL-2003 | [conll2003](https://huggingface.co/datasets/conll2003); [conllpp](https://huggingface.co/datasets/conllpp) | | ncbi_disease | [NCBI disease corpus](https://www.sciencedirect.com/science/article/pii/S1532046413001974?via%3Dihub) | train: 5.43K; valid: 924; test: 941 | Disease | 该数据集包含 NCBI 疾病语料库的疾病名称和概念注释,该语料库包含 793 篇 PubMed 摘要,在提及和概念级别进行了完整注释,可作为生物医学自然语言处理社区的研究资源。 | [ncbi_disease](https://huggingface.co/datasets/ncbi_disease) | | xglue | [XGLUE](https://arxiv.org/abs/2004.01401); [XGLUE](https://microsoft.github.io/XGLUE/) | | | XGLUE 是一个新的基准数据集,用于评估跨语言预训练模型在跨语言自然语言理解和生成方面的性能。 | [xglue](https://huggingface.co/datasets/xglue) | | plod | [PLOD](https://arxiv.org/abs/2204.12061); [PLOD-AbbreviationDetection](https://github.com/surrey-nlp/PLOD-AbbreviationDetection) | train: 1.06M; test: 118K | long; short; | 从非结构化文本中检测和提取缩写有助于提高自然语言处理任务的性能,例如机器翻译和信息检索。 | [batterydata/abbreviation_detection](https://huggingface.co/datasets/batterydata/abbreviation_detection); [surrey-nlp/PLOD-filtered](https://huggingface.co/datasets/surrey-nlp/PLOD-filtered) | | acronym_identification | [Acronym Identification](https://sites.google.com/view/sdu-aaai21/shared-task); [AAAI-21-SDU-shared-task-1-AI](https://github.com/amirveyseh/AAAI-21-SDU-shared-task-1-AI) | train: 14K; valid: 1.72K; test: 1.75K; | long; short; | SDU@AAAI-21 的首字母缩写词识别任务 | [acronym_identification](https://huggingface.co/datasets/acronym_identification) | | conll2012_ontonotesv5_english_v4 | [ontonotes-conll2012](https://data.mendeley.com/datasets/zmycy7t9h9/2); [ontonotes-v5](https://paperswithcode.com/sota/named-entity-recognition-ner-on-ontonotes-v5) | train: 1.94K, valid: 222; test: 222; | 18类: PERSON, NORP, FAC, ORG, GPE, LOC, PRODUCT, DATE, TIME, PERCENT, MONEY, QUANTITY, ORDINAL, CARDINAL, EVENT, WORK_OF_ART, LAW, LANGUAGE, | OntoNotes v5.0是OntoNotes语料库的最终版本,是一个大规模、多体裁、多语言的人工标注句法、语义和话语信息的语料库。 | [conll2012_ontonotesv5](https://huggingface.co/datasets/conll2012_ontonotesv5); | | conll2012_ontonotesv5_english_v12 | [ontonotes-conll2012](https://data.mendeley.com/datasets/zmycy7t9h9/2); [ontonotes-v5](https://paperswithcode.com/sota/named-entity-recognition-ner-on-ontonotes-v5) | train: 10.5K, valid: 1.37K; test: 1.2K; | 18类: PERSON, NORP, FAC, ORG, GPE, LOC, PRODUCT, DATE, TIME, PERCENT, MONEY, QUANTITY, ORDINAL, CARDINAL, EVENT, WORK_OF_ART, LAW, LANGUAGE, | OntoNotes v5.0是OntoNotes语料库的最终版本,是一个大规模、多体裁、多语言的人工标注句法、语义和话语信息的语料库。 | [conll2012_ontonotesv5](https://huggingface.co/datasets/conll2012_ontonotesv5); | | wnut_17 | [wnut-2017-emerging-and-rare-entity](https://paperswithcode.com/dataset/wnut-2017-emerging-and-rare-entity) | train: 3.39K, valid: 1.01K, test: 1.29K, | corporation, creative-work, group, location, person, product, | WNUT 17:新兴和稀有实体识别 | [wnut_17](https://huggingface.co/datasets/wnut_17); [tner/wnut2017](https://huggingface.co/datasets/tner/wnut2017) | | few_nerd | [Few-NERD](https://paperswithcode.com/dataset/few-nerd); [fewnerd](https://ningding97.github.io/fewnerd/) | train: 132K; valid: 18.8K; test: 37.6K; | (art, building, event, location, organization, other, person, product); (art-broadcastprogram, art-film, ...) | Few-NERD 是一个大规模、细粒度手动注释的命名实体识别数据集,包含 8 个粗粒度类型、66 个细粒度类型、188,200 个句子、491,711 个实体和 4,601,223 个标记。构建了三个基准任务,一个是监督任务(Few-NERD (SUP)),另外两个是少样本任务(Few-NERD (INTRA) 和 Few-NERD (INTER))。 | [DFKI-SLT/few-nerd](https://huggingface.co/datasets/DFKI-SLT/few-nerd) | | BLURB | | | | BLURB 是生物医学自然语言处理资源的集合。 | [EMBO/BLURB](https://huggingface.co/datasets/EMBO/BLURB) | | bc2gm | [bc2gm-corpus](https://github.com/spyysalo/bc2gm-corpus/raw/master/conll/); [Overview of BioCreative II gene mention recognition](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2559986/) | train: 12.5K; valid: 2.5K; test: 5K; | gene | 基因提及检测 | [bc2gm_corpus](https://huggingface.co/datasets/bc2gm_corpus) | | bc4chemd_ner | [BC4CHEMD](https://github.com/cambridgeltl/MTL-Bioinformatics-2016/tree/master/data/BC4CHEMD) | 10K | Chemical | 它是 10,000 条 PubMed 摘要的集合,其中总共包含 84,355 个化学实体提及项,由化学专家文献管理员手动标记,遵循专门为此任务定义的注释指南。 | [drAbreu/bc4chemd_ner](https://huggingface.co/datasets/drAbreu/bc4chemd_ner) | | pet | [pet](https://paperswithcode.com/dataset/pet); [pet-dataset](https://pdi.fbk.eu/pet-dataset/) | 500 | Actor; Activity; Activity Data; Further Specification; XOR Gateway; Condition Specification; AND Gateway; | PET:用于从自然语言文本中提取过程的新数据集 | [patriziobellan/PET](https://huggingface.co/datasets/patriziobellan/PET) | | ipm_nel | [Analysis of Named Entity Recognition and Linking for Tweets](http://www.derczynski.com/papers/ner_single.pdf); [entity-linking-on-derczynski-1](https://paperswithcode.com/sota/entity-linking-on-derczynski-1) | | | 数据集中于十种类型的命名实体:公司、设施、地理位置、电影、音乐艺术家、个人、产品、运动队、电视节目等。 | [strombergnlp/ipm_nel](https://huggingface.co/datasets/strombergnlp/ipm_nel) | | wiesp2022_ner | [WIESP](https://ui.adsabs.harvard.edu/WIESP/) | train: 1.75K; valid: 1.37K; test: 2.51K; | | 包含天体物理学论文文本片段的数据集,由 NASA 天体物理数据系统提供,并手动标记天文设施和其他感兴趣的实体(例如天体)。 | [adsabs/WIESP2022-NER](https://huggingface.co/datasets/adsabs/WIESP2022-NER) | | named_timexes | [Recognising and Interpreting Named Temporal Expressions](https://aclanthology.org/R13-1015.pdf) | | time | 这是一个为命名时间表达式块注释的数据集。 | [named_timexes](https://huggingface.co/datasets/strombergnlp/named_timexes) | | episet4ner_v2 | | train: 4.43K; valid: 1.21K; test: 537; | 实体类型 | EpiSet4NER-v2 是流行病学实体识别的黄金标准数据集 | [ncats/EpiSet4NER-v2](https://huggingface.co/datasets/ncats/EpiSet4NER-v2) | | sd_nlp_non_tokenized | [SourceData](https://sourcedata.embo.org/) | | SMALL_MOLECULE; GENEPROD; SUBCELLULAR; CELL; TISSUE; ORGANISM; DISEASE; EXP_ASSAY; CONTROLLED_VAR; MEASURED_VAR; | 该数据集基于 [SourceData](https://sourcedata.embo.org) 数据库的内容,其中包含用英语编写的手动注释的图形图例,并从细胞和分子生物学领域的科学论文中提取。 | [EMBO/sd-nlp-non-tokenized](https://huggingface.co/datasets/EMBO/sd-nlp-non-tokenized) | | xtreme_en | | train: 7.5K; valid: 750; production: 21.8K; | PER; ORG; LOC; | 它由一个大型电影评论数据集和一些来自酒店评论数据集的评论组成。 | [arize-ai/xtreme_en](https://huggingface.co/datasets/arize-ai/xtreme_en) | | tner | [asahi417/tner](https://github.com/asahi417/tner); [tner](https://huggingface.co/tner) | | | T-NER:用于基于 Transformer 的命名实体识别的全面 Python 库。其中包含了很多的数据集在他的 huggingface 账号。 | | #### 西班牙语实体识别 | 数据 | 原始数据/项目地址 | 样本个数 | 实体类型 | 原始数据描述 | 替代数据下载地址 | | :--- | :---: | :---: | :---: | :---: | :---: | | ehealth_kd | [eHealth-KD 2020](https://ceur-ws.org/Vol-2664/eHealth-KD_overview.pdf); [Testing data](https://github.com/knowledge-learning/ehealthkd-2020/tree/master/data/testing) | train: 800; valid: 199; test: 100 | Concept, Action, Predicate, Reference | IberLEF 2020 上 eHealth-KD 挑战赛的数据集。它旨在识别西班牙健康文档中的语义实体和关系。 | [ehealth_kd](https://huggingface.co/datasets/ehealth_kd) | ### 提示工程指南 https://www.promptingguide.ai/zh https://prompt-engineering.xiniushu.com/ https://learnprompting.org/zh-Hans/docs/basics/intro ### 参考来源 <details> <summary>参考的数据来源,展开查看</summary> <pre><code> [ttxy/cn_ner](https://huggingface.co/datasets/ttxy/cn_ner) [xusenlin/clue-ner](https://huggingface.co/datasets/xusenlin/clue-ner) [xusenlin/people-daily-ner](https://huggingface.co/datasets/xusenlin/people-daily-ner) [peoples_daily_ner](https://huggingface.co/datasets/peoples_daily_ner) [weibo_ner](https://huggingface.co/datasets/weibo_ner) [Rosenberg/weibo_ner](https://huggingface.co/datasets/Rosenberg/weibo_ner) [OneFly/NER](https://huggingface.co/datasets/OneFly/NER) [djagatiya/ner-ontonotes-v5-eng-v4](https://huggingface.co/datasets/djagatiya/ner-ontonotes-v5-eng-v4) [Adapting/chinese_biomedical_NER_dataset](https://huggingface.co/datasets/Adapting/chinese_biomedical_NER_dataset) [nlhappy/CLUE-NER](https://huggingface.co/datasets/nlhappy/CLUE-NER) [ttxy/resume_ner](https://huggingface.co/datasets/ttxy/resume_ner) [doushabao4766/ccks_2019_ner_k_V3_wc](https://huggingface.co/datasets/doushabao4766/ccks_2019_ner_k_V3_wc) </code></pre> </details>
qgyd2021/few_shot_ner_sft
[ "license:apache-2.0", "arxiv:2004.01401", "arxiv:2204.12061", "region:us" ]
2023-12-12T07:23:11+00:00
{"license": "apache-2.0"}
2023-12-27T02:25:23+00:00
[ "2004.01401", "2204.12061" ]
[]
TAGS #license-apache-2.0 #arxiv-2004.01401 #arxiv-2204.12061 #region-us
小样本实体识别 ------- 收集实体识别的数据集, 将其整理成 prompt-response 的形式. 基于语言模型的实体识别. 该数据集可用于: 1. 指令语言模型训练. 2. 数据集创建. (特定领域有少量标注数据时, 可与此数据集一起训练模型, 然后生成样本用于数据标注). 在 prompt 生成过程中会加入一些 '示例', 我们尽量使各实体的标签满足 'n\_way, n\_shot'. ### 样本示例 目前有三种实体标注的格式: * (1)句子重写. 比如 '"今天天气怎样"' 改写为 '"今天天气怎么"'. 这种方式的好处是能够从结果推断出实体的具体位置. * (2)json格式. 比如 '"今天天气怎样"' 输出 '{"entity\_text": "今天", "entity\_type": "date"}'. 这种方式输出的 json 字符串可能会有重复的. 因为句子中可能会有两个一样的实体. * (3)实体类型 - 实体原始字符串. 比如 '"今天天气怎样"' 输出 'date: 今天.'. 这种方式输出的实体, 每行文本以 '实体类型' 开始, 冒号 ':' 后面接实体原始字符串 '今天'. 如果 '实体原始字符串' 中有重复的, 目前会去重复. cmeee\_prompt ``` 实体识别. 请你对句子进行重写并标注出其中包含的实体. 实体有: - microbe - drug - procedure - item - disease 示例: ------ text: (二)感染因素病原微生物能否引起肠道感染,取决于宿主防御功能的强弱、感染量的大小以及微生物毒力(黏附性、产毒性及侵袭性)、细胞毒性,其中微生物的黏附能力对于肠道感染至关重要。 rewrited: (二)<disease>感染</disease>因素<microbe>病原微生物</microbe>能否引起<disease>肠道感染</disease>,取决于宿主<item>防御功能</item>的强弱、<item>感染量</item>的大小以及<microbe>微生物</microbe>毒力(黏附性、产毒性及侵袭性)、细胞毒性,其中<microbe>微生物</microbe>的黏附能力对于<disease>肠道感染</disease>至关重要。 ------ text: (三)幽门螺杆菌阴性消化性溃疡的传统治疗在下述药物中,以H2</sub>受体阻滞剂应用最多,其机制为抑制组胺对壁细胞的泌酸作用,但对于胆碱能神经或胃泌素合并的餐后胃酸分泌影响较小。 rewrited: (三)<disease>幽门螺杆菌阴性消化性溃疡</disease>的<procedure>传统治疗</procedure>在下述药物中,以<drug>H2</sub>受体阻滞剂</drug>应用最多,其机制为抑制组胺对壁细胞的泌酸作用,但对于胆碱能神经或胃泌素合并的餐后胃酸分泌影响较小。 ------ text: 用本法显影受肾功能的影响很小,有人估计,只要残留肾功能在3%以上,本法即可勉强显影。 rewrited: ``` cmeee\_prompt ``` 请对句子进行重写以标注出其中的 “药剂”,“科室”,“发病症状” 实体。 示例: text:参考文献1.胡亚美,江载芳.诸福棠实用儿科学.第7版.北京:人民卫生出版社,20022.江载芳.实用小儿呼吸病学.北京:人民卫生出版社,2010:2253.中华医学会儿科分会呼吸学组.儿童侵袭性肺部真菌感染诊治指南(儿童侵袭性真菌感染州医学.2009,22(3):183-1834.肖晶,周东风,孟浦.儿童侵袭性真菌感染早期诊断的研究进展.国际儿科学杂志,2009,36(5):523-5235.KlontRR,MenninkKerstenMH,RuegebrinkW,etal.ParadoxicalincreaseincirculatingHspergillusantigenduringtreatmentwithcaspofungininapatientwithpulmonaryaspergillosis.ClinInfectDis,2006,43(3):23-236.AgarwalR.Allergicbronchopulmonaryaspergillosis.Chest,2009,135:805-8267.ChabraSK,SahayandS,RamarajuK.Allergicbronchopulmonaryaspergillosiscomplicatingchildhoodasthma.IndianJPediatric,2009,76(3):331-331 rewrited:参考文献1.胡亚美,江载芳.诸福棠实用<科室>儿科</科室>学.第7版.北京:人民卫生出版社,20022.江载芳.实用小儿呼吸病学.北京:人民卫生出版社,2010:2253.中华医学会<科室>儿科</科室>分会呼吸学组.儿童侵袭性肺部真菌感染诊治指南(儿童侵袭性真菌感染州医学.2009,22(3):183-1834.肖晶,周东风,孟浦.儿童侵袭性真菌感染早期诊断的研究进展.国际<科室>儿科</科室>学杂志,2009,36(5):523-5235.KlontRR,MenninkKerstenMH,RuegebrinkW,etal.ParadoxicalincreaseincirculatingHspergillusantigenduringtreatmentwithcaspofungininapatientwithpulmonaryaspergillosis.ClinInfectDis,2006,43(3):23-236.AgarwalR.Allergicbronchopulmonaryaspergillosis.Chest,2009,135:805-8267.ChabraSK,SahayandS,RamarajuK.Allergicbronchopulmonaryaspergillosiscomplicatingchildhoodasthma.IndianJPediatric,2009,76(3):331-331 text:缘于1+月前患者因“CINIII级”在我院行“LEEP”术,术后病理报告(2014.10.27):(宫颈组织)宫颈粘膜慢性炎伴纳氏囊肿、糜烂,鳞状下皮CINIII级累及腺体,并伴局部区域微小浸润,宫颈管切端未见病变累及。建议行“残余子宫切除术”。遂今就诊我院,要求住院手术治疗,无阴道出血、异常排液,无发热、腹痛、腹胀、尿频、尿急等不适,故门诊拟“CINIII级”收入院。发病以来精神、睡眠、食欲尚可,大便如下述,小便正常,体重有明显减轻。 outputs: {"text": "“LEEP”术", "label": "手术"} {"text": "宫颈管", "label": "解剖部位"} {"text": "残余子宫切除术", "label": "手术"} {"text": "阴道", "label": "解剖部位"} {"text": "腹", "label": "解剖部位"} {"text": "腹", "label": "解剖部位"} raw\_text:3月17日,发现客户信用卡被人冒名补办的银行方,向督院街派出所报了案。案侦民警立即开始调查取证, outputs: government:向督院街派出所。 raw\_text:金石开:从往绩来看,阿森纳对桑德兰拥有压倒性的优势,英超以来交手全部获胜。不过,上赛季枪手两战“ outputs: organization:枪手;英超;桑德兰;阿森纳。 随后报警。警方调取银行录像,认出取钱者为周清来,并将其抓获。周清来交代,取走的6. raw\_text:3月17日,发现客户信用卡被人冒名补办的银行方,向督院街派出所报了案。案侦民警立即开始调查取证, outputs: government:向督院街派出所。 raw\_text:金石开:从往绩来看,阿森纳对桑德兰拥有压倒性的优势,英超以来交手全部获胜。不过,上赛季枪手两战“ outputs: organization:枪手;英超;桑德兰;阿森纳。 随后报警。警方调取银行录像,认出取钱者为周清来,并将其抓获。周清来交代,取走的6. text:蜜蜂产品与保健,书籍,保养保健 outputs: {"entity\_text": "书籍", "entity\_type": "商品"} text:足量,qd,mmc,plus卡,512m,内存卡,相机/老款手机,mmc,512m一体卡 outputs: {"entity\_text": "qd", "entity\_type": "型号"} {"entity\_text": "mmc", "entity\_type": "型号"} {"entity\_text": "plus", "entity\_type": "型号"} {"entity\_text": "卡", "entity\_type": "商品"} {"entity\_text": "内存卡", "entity\_type": "商品"} {"entity\_text": "相机", "entity\_type": "商品"} {"entity\_text": "手机", "entity\_type": "商品"} {"entity\_text": "mmc", "entity\_type": "型号"} {"entity\_text": "512m", "entity\_type": "型号"} {"entity\_text": "一体卡", "entity\_type": "商品"} text:放一首儿歌给我放一首儿歌 outputs: Age:儿歌。 text:导航去茌平 outputs: Destination:茌平。 text:想听齐秦齐豫的心经 outputs: Singer:齐豫;齐秦。 text:串烧乐曲 outputs: Style:串烧。 text:我要去公司 outputs: Custom Destination:公司。 text:萨克斯 outputs: Instrument:萨克斯。 ''' text:播放广东雨神的广东爱情故事 outputs: ``` nlpcc2018\_task4\_prompt ``` 虚拟助手槽位提取。 请你对句子进行重写并标注出其中包含的槽值。 需要的槽值有:乐器名称,语言,年代。 示例: ### text:古筝音乐 rewrited:<乐器名称>古筝</乐器名称>音乐 ### text:播放古筝 rewrited:播放<乐器名称>古筝</乐器名称> ### text:多放一点dj dj的中文歌曲 rewrited:多放一点dj dj的<语言>中文</语言>歌曲 ### text:播放藏族歌曲 rewrited:播放<语言>藏族</语言>歌曲 ### text:播放儿歌蓝皮鼠和大脸猫的主题曲 rewrited:播放<年代>儿歌</年代>蓝皮鼠和大脸猫的主题曲 ### text:播放儿童歌曲 rewrited:播放<年代>儿童</年代>歌曲 ### text:播放前小白的关灯与神 rewrited: 播放前小白的关灯与神 ``` ### 数据来源 #### 汉语实体识别 #### 英语实体识别 #### 西班牙语实体识别 ### 提示工程指南 URL URL URL ### 参考来源 参考的数据来源,展开查看 ``` ttxy/cn_ner xusenlin/clue-ner xusenlin/people-daily-ner peoples_daily_ner weibo_ner Rosenberg/weibo_ner OneFly/NER djagatiya/ner-ontonotes-v5-eng-v4 Adapting/chinese_biomedical_NER_dataset nlhappy/CLUE-NER ttxy/resume_ner doushabao4766/ccks_2019_ner_k_V3_wc ```
[ "### 样本示例\n\n\n目前有三种实体标注的格式:\n\n\n* (1)句子重写.\n\n\n比如 '\"今天天气怎样\"' 改写为 '\"今天天气怎么\"'.\n\n\n这种方式的好处是能够从结果推断出实体的具体位置.\n\n\n* (2)json格式.\n\n\n比如 '\"今天天气怎样\"' 输出 '{\"entity\\_text\": \"今天\", \"entity\\_type\": \"date\"}'.\n\n\n这种方式输出的 json 字符串可能会有重复的. 因为句子中可能会有两个一样的实体.\n\n\n* (3)实体类型 - 实体原始字符串.\n\n\n比如 '\"今天天气怎样\"' 输出 'date: 今天.'.\n\n\n这种方式输出的实体, 每行文本以 '实体类型' 开始, 冒号 ':' 后面接实体原始字符串 '今天'.\n\n\n如果 '实体原始字符串' 中有重复的, 目前会去重复.\n\n\n\ncmeee\\_prompt\n\n```\n实体识别. \n\n请你对句子进行重写并标注出其中包含的实体. \n\n实体有:\n- microbe\n- drug\n- procedure\n- item\n- disease \n\n示例:\n------\ntext: (二)感染因素病原微生物能否引起肠道感染,取决于宿主防御功能的强弱、感染量的大小以及微生物毒力(黏附性、产毒性及侵袭性)、细胞毒性,其中微生物的黏附能力对于肠道感染至关重要。\nrewrited: (二)<disease>感染</disease>因素<microbe>病原微生物</microbe>能否引起<disease>肠道感染</disease>,取决于宿主<item>防御功能</item>的强弱、<item>感染量</item>的大小以及<microbe>微生物</microbe>毒力(黏附性、产毒性及侵袭性)、细胞毒性,其中<microbe>微生物</microbe>的黏附能力对于<disease>肠道感染</disease>至关重要。\n------\ntext: (三)幽门螺杆菌阴性消化性溃疡的传统治疗在下述药物中,以H2</sub>受体阻滞剂应用最多,其机制为抑制组胺对壁细胞的泌酸作用,但对于胆碱能神经或胃泌素合并的餐后胃酸分泌影响较小。\nrewrited: (三)<disease>幽门螺杆菌阴性消化性溃疡</disease>的<procedure>传统治疗</procedure>在下述药物中,以<drug>H2</sub>受体阻滞剂</drug>应用最多,其机制为抑制组胺对壁细胞的泌酸作用,但对于胆碱能神经或胃泌素合并的餐后胃酸分泌影响较小。\n------\ntext: 用本法显影受肾功能的影响很小,有人估计,只要残留肾功能在3%以上,本法即可勉强显影。\nrewrited:\n\n```\n\n\n\ncmeee\\_prompt\n\n```\n请对句子进行重写以标注出其中的 “药剂”,“科室”,“发病症状” 实体。 \n\n示例:\ntext:参考文献1.胡亚美,江载芳.诸福棠实用儿科学.第7版.北京:人民卫生出版社,20022.江载芳.实用小儿呼吸病学.北京:人民卫生出版社,2010:2253.中华医学会儿科分会呼吸学组.儿童侵袭性肺部真菌感染诊治指南(儿童侵袭性真菌感染州医学.2009,22(3):183-1834.肖晶,周东风,孟浦.儿童侵袭性真菌感染早期诊断的研究进展.国际儿科学杂志,2009,36(5):523-5235.KlontRR,MenninkKerstenMH,RuegebrinkW,etal.ParadoxicalincreaseincirculatingHspergillusantigenduringtreatmentwithcaspofungininapatientwithpulmonaryaspergillosis.ClinInfectDis,2006,43(3):23-236.AgarwalR.Allergicbronchopulmonaryaspergillosis.Chest,2009,135:805-8267.ChabraSK,SahayandS,RamarajuK.Allergicbronchopulmonaryaspergillosiscomplicatingchildhoodasthma.IndianJPediatric,2009,76(3):331-331\nrewrited:参考文献1.胡亚美,江载芳.诸福棠实用<科室>儿科</科室>学.第7版.北京:人民卫生出版社,20022.江载芳.实用小儿呼吸病学.北京:人民卫生出版社,2010:2253.中华医学会<科室>儿科</科室>分会呼吸学组.儿童侵袭性肺部真菌感染诊治指南(儿童侵袭性真菌感染州医学.2009,22(3):183-1834.肖晶,周东风,孟浦.儿童侵袭性真菌感染早期诊断的研究进展.国际<科室>儿科</科室>学杂志,2009,36(5):523-5235.KlontRR,MenninkKerstenMH,RuegebrinkW,etal.ParadoxicalincreaseincirculatingHspergillusantigenduringtreatmentwithcaspofungininapatientwithpulmonaryaspergillosis.ClinInfectDis,2006,43(3):23-236.AgarwalR.Allergicbronchopulmonaryaspergillosis.Chest,2009,135:805-8267.ChabraSK,SahayandS,RamarajuK.Allergicbronchopulmonaryaspergillosiscomplicatingchildhoodasthma.IndianJPediatric,2009,76(3):331-331\n\n\ntext:缘于1+月前患者因“CINIII级”在我院行“LEEP”术,术后病理报告(2014.10.27):(宫颈组织)宫颈粘膜慢性炎伴纳氏囊肿、糜烂,鳞状下皮CINIII级累及腺体,并伴局部区域微小浸润,宫颈管切端未见病变累及。建议行“残余子宫切除术”。遂今就诊我院,要求住院手术治疗,无阴道出血、异常排液,无发热、腹痛、腹胀、尿频、尿急等不适,故门诊拟“CINIII级”收入院。发病以来精神、睡眠、食欲尚可,大便如下述,小便正常,体重有明显减轻。\noutputs:\n{\"text\": \"“LEEP”术\", \"label\": \"手术\"}\n{\"text\": \"宫颈管\", \"label\": \"解剖部位\"}\n{\"text\": \"残余子宫切除术\", \"label\": \"手术\"}\n{\"text\": \"阴道\", \"label\": \"解剖部位\"}\n{\"text\": \"腹\", \"label\": \"解剖部位\"}\n{\"text\": \"腹\", \"label\": \"解剖部位\"}\n\n\nraw\\_text:3月17日,发现客户信用卡被人冒名补办的银行方,向督院街派出所报了案。案侦民警立即开始调查取证,\noutputs:\ngovernment:向督院街派出所。\n\n\nraw\\_text:金石开:从往绩来看,阿森纳对桑德兰拥有压倒性的优势,英超以来交手全部获胜。不过,上赛季枪手两战“\noutputs:\norganization:枪手;英超;桑德兰;阿森纳。\n\n\n随后报警。警方调取银行录像,认出取钱者为周清来,并将其抓获。周清来交代,取走的6.\n\n\nraw\\_text:3月17日,发现客户信用卡被人冒名补办的银行方,向督院街派出所报了案。案侦民警立即开始调查取证,\noutputs:\ngovernment:向督院街派出所。\n\n\nraw\\_text:金石开:从往绩来看,阿森纳对桑德兰拥有压倒性的优势,英超以来交手全部获胜。不过,上赛季枪手两战“\noutputs:\norganization:枪手;英超;桑德兰;阿森纳。\n\n\n随后报警。警方调取银行录像,认出取钱者为周清来,并将其抓获。周清来交代,取走的6.\n\n\ntext:蜜蜂产品与保健,书籍,保养保健\noutputs:\n{\"entity\\_text\": \"书籍\", \"entity\\_type\": \"商品\"}\n\n\ntext:足量,qd,mmc,plus卡,512m,内存卡,相机/老款手机,mmc,512m一体卡\noutputs:\n{\"entity\\_text\": \"qd\", \"entity\\_type\": \"型号\"}\n{\"entity\\_text\": \"mmc\", \"entity\\_type\": \"型号\"}\n{\"entity\\_text\": \"plus\", \"entity\\_type\": \"型号\"}\n{\"entity\\_text\": \"卡\", \"entity\\_type\": \"商品\"}\n{\"entity\\_text\": \"内存卡\", \"entity\\_type\": \"商品\"}\n{\"entity\\_text\": \"相机\", \"entity\\_type\": \"商品\"}\n{\"entity\\_text\": \"手机\", \"entity\\_type\": \"商品\"}\n{\"entity\\_text\": \"mmc\", \"entity\\_type\": \"型号\"}\n{\"entity\\_text\": \"512m\", \"entity\\_type\": \"型号\"}\n{\"entity\\_text\": \"一体卡\", \"entity\\_type\": \"商品\"}\n\n\ntext:放一首儿歌给我放一首儿歌\noutputs:\nAge:儿歌。\n\n\ntext:导航去茌平\noutputs:\nDestination:茌平。\n\n\ntext:想听齐秦齐豫的心经\noutputs:\nSinger:齐豫;齐秦。\n\n\ntext:串烧乐曲\noutputs:\nStyle:串烧。\n\n\ntext:我要去公司\noutputs:\nCustom Destination:公司。\n\n\ntext:萨克斯\noutputs:\nInstrument:萨克斯。\n'''\ntext:播放广东雨神的广东爱情故事\noutputs:\n\n\n\n```\n\n\n\nnlpcc2018\\_task4\\_prompt\n\n```\n虚拟助手槽位提取。 \n\n请你对句子进行重写并标注出其中包含的槽值。 \n\n需要的槽值有:乐器名称,语言,年代。 \n\n示例:", "### 数据来源", "#### 汉语实体识别", "#### 英语实体识别", "#### 西班牙语实体识别", "### 提示工程指南\n\n\nURL\n\n\nURL\n\n\nURL", "### 参考来源\n\n\n\n参考的数据来源,展开查看\n\n```\n\nttxy/cn_ner\nxusenlin/clue-ner\nxusenlin/people-daily-ner\npeoples_daily_ner\nweibo_ner\nRosenberg/weibo_ner\nOneFly/NER\ndjagatiya/ner-ontonotes-v5-eng-v4\nAdapting/chinese_biomedical_NER_dataset\nnlhappy/CLUE-NER\nttxy/resume_ner\ndoushabao4766/ccks_2019_ner_k_V3_wc\n\n```" ]
[ "TAGS\n#license-apache-2.0 #arxiv-2004.01401 #arxiv-2204.12061 #region-us \n", "### 样本示例\n\n\n目前有三种实体标注的格式:\n\n\n* (1)句子重写.\n\n\n比如 '\"今天天气怎样\"' 改写为 '\"今天天气怎么\"'.\n\n\n这种方式的好处是能够从结果推断出实体的具体位置.\n\n\n* (2)json格式.\n\n\n比如 '\"今天天气怎样\"' 输出 '{\"entity\\_text\": \"今天\", \"entity\\_type\": \"date\"}'.\n\n\n这种方式输出的 json 字符串可能会有重复的. 因为句子中可能会有两个一样的实体.\n\n\n* (3)实体类型 - 实体原始字符串.\n\n\n比如 '\"今天天气怎样\"' 输出 'date: 今天.'.\n\n\n这种方式输出的实体, 每行文本以 '实体类型' 开始, 冒号 ':' 后面接实体原始字符串 '今天'.\n\n\n如果 '实体原始字符串' 中有重复的, 目前会去重复.\n\n\n\ncmeee\\_prompt\n\n```\n实体识别. \n\n请你对句子进行重写并标注出其中包含的实体. \n\n实体有:\n- microbe\n- drug\n- procedure\n- item\n- disease \n\n示例:\n------\ntext: (二)感染因素病原微生物能否引起肠道感染,取决于宿主防御功能的强弱、感染量的大小以及微生物毒力(黏附性、产毒性及侵袭性)、细胞毒性,其中微生物的黏附能力对于肠道感染至关重要。\nrewrited: (二)<disease>感染</disease>因素<microbe>病原微生物</microbe>能否引起<disease>肠道感染</disease>,取决于宿主<item>防御功能</item>的强弱、<item>感染量</item>的大小以及<microbe>微生物</microbe>毒力(黏附性、产毒性及侵袭性)、细胞毒性,其中<microbe>微生物</microbe>的黏附能力对于<disease>肠道感染</disease>至关重要。\n------\ntext: (三)幽门螺杆菌阴性消化性溃疡的传统治疗在下述药物中,以H2</sub>受体阻滞剂应用最多,其机制为抑制组胺对壁细胞的泌酸作用,但对于胆碱能神经或胃泌素合并的餐后胃酸分泌影响较小。\nrewrited: (三)<disease>幽门螺杆菌阴性消化性溃疡</disease>的<procedure>传统治疗</procedure>在下述药物中,以<drug>H2</sub>受体阻滞剂</drug>应用最多,其机制为抑制组胺对壁细胞的泌酸作用,但对于胆碱能神经或胃泌素合并的餐后胃酸分泌影响较小。\n------\ntext: 用本法显影受肾功能的影响很小,有人估计,只要残留肾功能在3%以上,本法即可勉强显影。\nrewrited:\n\n```\n\n\n\ncmeee\\_prompt\n\n```\n请对句子进行重写以标注出其中的 “药剂”,“科室”,“发病症状” 实体。 \n\n示例:\ntext:参考文献1.胡亚美,江载芳.诸福棠实用儿科学.第7版.北京:人民卫生出版社,20022.江载芳.实用小儿呼吸病学.北京:人民卫生出版社,2010:2253.中华医学会儿科分会呼吸学组.儿童侵袭性肺部真菌感染诊治指南(儿童侵袭性真菌感染州医学.2009,22(3):183-1834.肖晶,周东风,孟浦.儿童侵袭性真菌感染早期诊断的研究进展.国际儿科学杂志,2009,36(5):523-5235.KlontRR,MenninkKerstenMH,RuegebrinkW,etal.ParadoxicalincreaseincirculatingHspergillusantigenduringtreatmentwithcaspofungininapatientwithpulmonaryaspergillosis.ClinInfectDis,2006,43(3):23-236.AgarwalR.Allergicbronchopulmonaryaspergillosis.Chest,2009,135:805-8267.ChabraSK,SahayandS,RamarajuK.Allergicbronchopulmonaryaspergillosiscomplicatingchildhoodasthma.IndianJPediatric,2009,76(3):331-331\nrewrited:参考文献1.胡亚美,江载芳.诸福棠实用<科室>儿科</科室>学.第7版.北京:人民卫生出版社,20022.江载芳.实用小儿呼吸病学.北京:人民卫生出版社,2010:2253.中华医学会<科室>儿科</科室>分会呼吸学组.儿童侵袭性肺部真菌感染诊治指南(儿童侵袭性真菌感染州医学.2009,22(3):183-1834.肖晶,周东风,孟浦.儿童侵袭性真菌感染早期诊断的研究进展.国际<科室>儿科</科室>学杂志,2009,36(5):523-5235.KlontRR,MenninkKerstenMH,RuegebrinkW,etal.ParadoxicalincreaseincirculatingHspergillusantigenduringtreatmentwithcaspofungininapatientwithpulmonaryaspergillosis.ClinInfectDis,2006,43(3):23-236.AgarwalR.Allergicbronchopulmonaryaspergillosis.Chest,2009,135:805-8267.ChabraSK,SahayandS,RamarajuK.Allergicbronchopulmonaryaspergillosiscomplicatingchildhoodasthma.IndianJPediatric,2009,76(3):331-331\n\n\ntext:缘于1+月前患者因“CINIII级”在我院行“LEEP”术,术后病理报告(2014.10.27):(宫颈组织)宫颈粘膜慢性炎伴纳氏囊肿、糜烂,鳞状下皮CINIII级累及腺体,并伴局部区域微小浸润,宫颈管切端未见病变累及。建议行“残余子宫切除术”。遂今就诊我院,要求住院手术治疗,无阴道出血、异常排液,无发热、腹痛、腹胀、尿频、尿急等不适,故门诊拟“CINIII级”收入院。发病以来精神、睡眠、食欲尚可,大便如下述,小便正常,体重有明显减轻。\noutputs:\n{\"text\": \"“LEEP”术\", \"label\": \"手术\"}\n{\"text\": \"宫颈管\", \"label\": \"解剖部位\"}\n{\"text\": \"残余子宫切除术\", \"label\": \"手术\"}\n{\"text\": \"阴道\", \"label\": \"解剖部位\"}\n{\"text\": \"腹\", \"label\": \"解剖部位\"}\n{\"text\": \"腹\", \"label\": \"解剖部位\"}\n\n\nraw\\_text:3月17日,发现客户信用卡被人冒名补办的银行方,向督院街派出所报了案。案侦民警立即开始调查取证,\noutputs:\ngovernment:向督院街派出所。\n\n\nraw\\_text:金石开:从往绩来看,阿森纳对桑德兰拥有压倒性的优势,英超以来交手全部获胜。不过,上赛季枪手两战“\noutputs:\norganization:枪手;英超;桑德兰;阿森纳。\n\n\n随后报警。警方调取银行录像,认出取钱者为周清来,并将其抓获。周清来交代,取走的6.\n\n\nraw\\_text:3月17日,发现客户信用卡被人冒名补办的银行方,向督院街派出所报了案。案侦民警立即开始调查取证,\noutputs:\ngovernment:向督院街派出所。\n\n\nraw\\_text:金石开:从往绩来看,阿森纳对桑德兰拥有压倒性的优势,英超以来交手全部获胜。不过,上赛季枪手两战“\noutputs:\norganization:枪手;英超;桑德兰;阿森纳。\n\n\n随后报警。警方调取银行录像,认出取钱者为周清来,并将其抓获。周清来交代,取走的6.\n\n\ntext:蜜蜂产品与保健,书籍,保养保健\noutputs:\n{\"entity\\_text\": \"书籍\", \"entity\\_type\": \"商品\"}\n\n\ntext:足量,qd,mmc,plus卡,512m,内存卡,相机/老款手机,mmc,512m一体卡\noutputs:\n{\"entity\\_text\": \"qd\", \"entity\\_type\": \"型号\"}\n{\"entity\\_text\": \"mmc\", \"entity\\_type\": \"型号\"}\n{\"entity\\_text\": \"plus\", \"entity\\_type\": \"型号\"}\n{\"entity\\_text\": \"卡\", \"entity\\_type\": \"商品\"}\n{\"entity\\_text\": \"内存卡\", \"entity\\_type\": \"商品\"}\n{\"entity\\_text\": \"相机\", \"entity\\_type\": \"商品\"}\n{\"entity\\_text\": \"手机\", \"entity\\_type\": \"商品\"}\n{\"entity\\_text\": \"mmc\", \"entity\\_type\": \"型号\"}\n{\"entity\\_text\": \"512m\", \"entity\\_type\": \"型号\"}\n{\"entity\\_text\": \"一体卡\", \"entity\\_type\": \"商品\"}\n\n\ntext:放一首儿歌给我放一首儿歌\noutputs:\nAge:儿歌。\n\n\ntext:导航去茌平\noutputs:\nDestination:茌平。\n\n\ntext:想听齐秦齐豫的心经\noutputs:\nSinger:齐豫;齐秦。\n\n\ntext:串烧乐曲\noutputs:\nStyle:串烧。\n\n\ntext:我要去公司\noutputs:\nCustom Destination:公司。\n\n\ntext:萨克斯\noutputs:\nInstrument:萨克斯。\n'''\ntext:播放广东雨神的广东爱情故事\noutputs:\n\n\n\n```\n\n\n\nnlpcc2018\\_task4\\_prompt\n\n```\n虚拟助手槽位提取。 \n\n请你对句子进行重写并标注出其中包含的槽值。 \n\n需要的槽值有:乐器名称,语言,年代。 \n\n示例:", "### 数据来源", "#### 汉语实体识别", "#### 英语实体识别", "#### 西班牙语实体识别", "### 提示工程指南\n\n\nURL\n\n\nURL\n\n\nURL", "### 参考来源\n\n\n\n参考的数据来源,展开查看\n\n```\n\nttxy/cn_ner\nxusenlin/clue-ner\nxusenlin/people-daily-ner\npeoples_daily_ner\nweibo_ner\nRosenberg/weibo_ner\nOneFly/NER\ndjagatiya/ner-ontonotes-v5-eng-v4\nAdapting/chinese_biomedical_NER_dataset\nnlhappy/CLUE-NER\nttxy/resume_ner\ndoushabao4766/ccks_2019_ner_k_V3_wc\n\n```" ]
[ 30, 2431, 5, 6, 6, 7, 9, 128 ]
[ "passage: TAGS\n#license-apache-2.0 #arxiv-2004.01401 #arxiv-2204.12061 #region-us \n" ]
e77c177c687058e31a139cebba1fa868e9913b70
# Dataset Card for Evaluation run of janhq/supermario-slerp-v2 <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [janhq/supermario-slerp-v2](https://huggingface.co/janhq/supermario-slerp-v2) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_janhq__supermario-slerp-v2", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T07:20:29.210830](https://huggingface.co/datasets/open-llm-leaderboard/details_janhq__supermario-slerp-v2/blob/main/results_2023-12-12T07-20-29.210830.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.6523192880412831, "acc_stderr": 0.031929152104876686, "acc_norm": 0.6535004820907845, "acc_norm_stderr": 0.03257318036897926, "mc1": 0.47613219094247244, "mc1_stderr": 0.017483547156961574, "mc2": 0.6295900737174474, "mc2_stderr": 0.015194573521166509 }, "harness|arc:challenge|25": { "acc": 0.6680887372013652, "acc_stderr": 0.01376098820088054, "acc_norm": 0.6936860068259386, "acc_norm_stderr": 0.013470584417276513 }, "harness|hellaswag|10": { "acc": 0.6837283409679347, "acc_stderr": 0.004640699483543311, "acc_norm": 0.8659629555865366, "acc_norm_stderr": 0.003399958334372066 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.28, "acc_stderr": 0.045126085985421276, "acc_norm": 0.28, "acc_norm_stderr": 0.045126085985421276 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.6444444444444445, "acc_stderr": 0.04135176749720385, "acc_norm": 0.6444444444444445, "acc_norm_stderr": 0.04135176749720385 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.7236842105263158, "acc_stderr": 0.03639057569952928, "acc_norm": 0.7236842105263158, "acc_norm_stderr": 0.03639057569952928 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.61, "acc_stderr": 0.04902071300001975, "acc_norm": 0.61, "acc_norm_stderr": 0.04902071300001975 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.6981132075471698, "acc_stderr": 0.028254200344438662, "acc_norm": 0.6981132075471698, "acc_norm_stderr": 0.028254200344438662 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.7638888888888888, "acc_stderr": 0.03551446610810826, "acc_norm": 0.7638888888888888, "acc_norm_stderr": 0.03551446610810826 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.46, "acc_stderr": 0.05009082659620333, "acc_norm": 0.46, "acc_norm_stderr": 0.05009082659620333 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.55, "acc_stderr": 0.05, "acc_norm": 0.55, "acc_norm_stderr": 0.05 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.3, "acc_stderr": 0.046056618647183814, "acc_norm": 0.3, "acc_norm_stderr": 0.046056618647183814 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.6589595375722543, "acc_stderr": 0.03614665424180826, "acc_norm": 0.6589595375722543, "acc_norm_stderr": 0.03614665424180826 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.4117647058823529, "acc_stderr": 0.048971049527263666, "acc_norm": 0.4117647058823529, "acc_norm_stderr": 0.048971049527263666 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.76, "acc_stderr": 0.042923469599092816, "acc_norm": 0.76, "acc_norm_stderr": 0.042923469599092816 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.5872340425531914, "acc_stderr": 0.03218471141400351, "acc_norm": 0.5872340425531914, "acc_norm_stderr": 0.03218471141400351 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.49122807017543857, "acc_stderr": 0.04702880432049615, "acc_norm": 0.49122807017543857, "acc_norm_stderr": 0.04702880432049615 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.5793103448275863, "acc_stderr": 0.0411391498118926, "acc_norm": 0.5793103448275863, "acc_norm_stderr": 0.0411391498118926 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.4126984126984127, "acc_stderr": 0.025355741263055273, "acc_norm": 0.4126984126984127, "acc_norm_stderr": 0.025355741263055273 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.48412698412698413, "acc_stderr": 0.04469881854072606, "acc_norm": 0.48412698412698413, "acc_norm_stderr": 0.04469881854072606 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.36, "acc_stderr": 0.048241815132442176, "acc_norm": 0.36, "acc_norm_stderr": 0.048241815132442176 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.7870967741935484, "acc_stderr": 0.023287665127268552, "acc_norm": 0.7870967741935484, "acc_norm_stderr": 0.023287665127268552 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.4975369458128079, "acc_stderr": 0.03517945038691063, "acc_norm": 0.4975369458128079, "acc_norm_stderr": 0.03517945038691063 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.73, "acc_stderr": 0.044619604333847394, "acc_norm": 0.73, "acc_norm_stderr": 0.044619604333847394 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.7878787878787878, "acc_stderr": 0.03192271569548301, "acc_norm": 0.7878787878787878, "acc_norm_stderr": 0.03192271569548301 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.803030303030303, "acc_stderr": 0.028335609732463362, "acc_norm": 0.803030303030303, "acc_norm_stderr": 0.028335609732463362 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.917098445595855, "acc_stderr": 0.01989934131572178, "acc_norm": 0.917098445595855, "acc_norm_stderr": 0.01989934131572178 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.676923076923077, "acc_stderr": 0.02371088850197057, "acc_norm": 0.676923076923077, "acc_norm_stderr": 0.02371088850197057 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.32592592592592595, "acc_stderr": 0.02857834836547308, "acc_norm": 0.32592592592592595, "acc_norm_stderr": 0.02857834836547308 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.680672268907563, "acc_stderr": 0.030283995525884396, "acc_norm": 0.680672268907563, "acc_norm_stderr": 0.030283995525884396 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.37748344370860926, "acc_stderr": 0.0395802723112157, "acc_norm": 0.37748344370860926, "acc_norm_stderr": 0.0395802723112157 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.8550458715596331, "acc_stderr": 0.015094215699700488, "acc_norm": 0.8550458715596331, "acc_norm_stderr": 0.015094215699700488 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.5, "acc_stderr": 0.034099716973523674, "acc_norm": 0.5, "acc_norm_stderr": 0.034099716973523674 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.8578431372549019, "acc_stderr": 0.02450980392156861, "acc_norm": 0.8578431372549019, "acc_norm_stderr": 0.02450980392156861 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.8270042194092827, "acc_stderr": 0.024621562866768427, "acc_norm": 0.8270042194092827, "acc_norm_stderr": 0.024621562866768427 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.6816143497757847, "acc_stderr": 0.03126580522513713, "acc_norm": 0.6816143497757847, "acc_norm_stderr": 0.03126580522513713 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.7709923664122137, "acc_stderr": 0.036853466317118506, "acc_norm": 0.7709923664122137, "acc_norm_stderr": 0.036853466317118506 }, "harness|hendrycksTest-international_law|5": { "acc": 0.7933884297520661, "acc_stderr": 0.03695980128098824, "acc_norm": 0.7933884297520661, "acc_norm_stderr": 0.03695980128098824 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.7870370370370371, "acc_stderr": 0.0395783547198098, "acc_norm": 0.7870370370370371, "acc_norm_stderr": 0.0395783547198098 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.7730061349693251, "acc_stderr": 0.03291099578615769, "acc_norm": 0.7730061349693251, "acc_norm_stderr": 0.03291099578615769 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.5089285714285714, "acc_stderr": 0.04745033255489123, "acc_norm": 0.5089285714285714, "acc_norm_stderr": 0.04745033255489123 }, "harness|hendrycksTest-management|5": { "acc": 0.8058252427184466, "acc_stderr": 0.039166677628225836, "acc_norm": 0.8058252427184466, "acc_norm_stderr": 0.039166677628225836 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8632478632478633, "acc_stderr": 0.022509033937077802, "acc_norm": 0.8632478632478633, "acc_norm_stderr": 0.022509033937077802 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.72, "acc_stderr": 0.045126085985421276, "acc_norm": 0.72, "acc_norm_stderr": 0.045126085985421276 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.8365261813537676, "acc_stderr": 0.013223928616741624, "acc_norm": 0.8365261813537676, "acc_norm_stderr": 0.013223928616741624 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.7167630057803468, "acc_stderr": 0.02425790170532338, "acc_norm": 0.7167630057803468, "acc_norm_stderr": 0.02425790170532338 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.41675977653631285, "acc_stderr": 0.016489134962438954, "acc_norm": 0.41675977653631285, "acc_norm_stderr": 0.016489134962438954 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.7189542483660131, "acc_stderr": 0.025738854797818737, "acc_norm": 0.7189542483660131, "acc_norm_stderr": 0.025738854797818737 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.7009646302250804, "acc_stderr": 0.026003301117885135, "acc_norm": 0.7009646302250804, "acc_norm_stderr": 0.026003301117885135 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.7160493827160493, "acc_stderr": 0.025089478523765137, "acc_norm": 0.7160493827160493, "acc_norm_stderr": 0.025089478523765137 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.475177304964539, "acc_stderr": 0.02979071924382972, "acc_norm": 0.475177304964539, "acc_norm_stderr": 0.02979071924382972 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.4595827900912647, "acc_stderr": 0.01272844606766997, "acc_norm": 0.4595827900912647, "acc_norm_stderr": 0.01272844606766997 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.6911764705882353, "acc_stderr": 0.02806499816704009, "acc_norm": 0.6911764705882353, "acc_norm_stderr": 0.02806499816704009 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.6683006535947712, "acc_stderr": 0.019047485239360378, "acc_norm": 0.6683006535947712, "acc_norm_stderr": 0.019047485239360378 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.6909090909090909, "acc_stderr": 0.044262946482000985, "acc_norm": 0.6909090909090909, "acc_norm_stderr": 0.044262946482000985 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.7387755102040816, "acc_stderr": 0.028123429335142783, "acc_norm": 0.7387755102040816, "acc_norm_stderr": 0.028123429335142783 }, "harness|hendrycksTest-sociology|5": { "acc": 0.8507462686567164, "acc_stderr": 0.025196929874827072, "acc_norm": 0.8507462686567164, "acc_norm_stderr": 0.025196929874827072 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.87, "acc_stderr": 0.03379976689896309, "acc_norm": 0.87, "acc_norm_stderr": 0.03379976689896309 }, "harness|hendrycksTest-virology|5": { "acc": 0.536144578313253, "acc_stderr": 0.038823108508905954, "acc_norm": 0.536144578313253, "acc_norm_stderr": 0.038823108508905954 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8070175438596491, "acc_stderr": 0.030267457554898458, "acc_norm": 0.8070175438596491, "acc_norm_stderr": 0.030267457554898458 }, "harness|truthfulqa:mc|0": { "mc1": 0.47613219094247244, "mc1_stderr": 0.017483547156961574, "mc2": 0.6295900737174474, "mc2_stderr": 0.015194573521166509 }, "harness|winogrande|5": { "acc": 0.8082083662194159, "acc_stderr": 0.011065209664659527 }, "harness|gsm8k|5": { "acc": 0.6345716451857468, "acc_stderr": 0.013264282030266635 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_janhq__supermario-slerp-v2
[ "region:us" ]
2023-12-12T07:23:22+00:00
{"pretty_name": "Evaluation run of janhq/supermario-slerp-v2", "dataset_summary": "Dataset automatically created during the evaluation run of model [janhq/supermario-slerp-v2](https://huggingface.co/janhq/supermario-slerp-v2) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_janhq__supermario-slerp-v2\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T07:20:29.210830](https://huggingface.co/datasets/open-llm-leaderboard/details_janhq__supermario-slerp-v2/blob/main/results_2023-12-12T07-20-29.210830.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.6523192880412831,\n \"acc_stderr\": 0.031929152104876686,\n \"acc_norm\": 0.6535004820907845,\n \"acc_norm_stderr\": 0.03257318036897926,\n \"mc1\": 0.47613219094247244,\n \"mc1_stderr\": 0.017483547156961574,\n \"mc2\": 0.6295900737174474,\n \"mc2_stderr\": 0.015194573521166509\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.6680887372013652,\n \"acc_stderr\": 0.01376098820088054,\n \"acc_norm\": 0.6936860068259386,\n \"acc_norm_stderr\": 0.013470584417276513\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6837283409679347,\n \"acc_stderr\": 0.004640699483543311,\n \"acc_norm\": 0.8659629555865366,\n \"acc_norm_stderr\": 0.003399958334372066\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.28,\n \"acc_stderr\": 0.045126085985421276,\n \"acc_norm\": 0.28,\n \"acc_norm_stderr\": 0.045126085985421276\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.6444444444444445,\n \"acc_stderr\": 0.04135176749720385,\n \"acc_norm\": 0.6444444444444445,\n \"acc_norm_stderr\": 0.04135176749720385\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.7236842105263158,\n \"acc_stderr\": 0.03639057569952928,\n \"acc_norm\": 0.7236842105263158,\n \"acc_norm_stderr\": 0.03639057569952928\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.61,\n \"acc_stderr\": 0.04902071300001975,\n \"acc_norm\": 0.61,\n \"acc_norm_stderr\": 0.04902071300001975\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.6981132075471698,\n \"acc_stderr\": 0.028254200344438662,\n \"acc_norm\": 0.6981132075471698,\n \"acc_norm_stderr\": 0.028254200344438662\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.7638888888888888,\n \"acc_stderr\": 0.03551446610810826,\n \"acc_norm\": 0.7638888888888888,\n \"acc_norm_stderr\": 0.03551446610810826\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.46,\n \"acc_stderr\": 0.05009082659620333,\n \"acc_norm\": 0.46,\n \"acc_norm_stderr\": 0.05009082659620333\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.55,\n \"acc_stderr\": 0.05,\n \"acc_norm\": 0.55,\n \"acc_norm_stderr\": 0.05\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.3,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.3,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.6589595375722543,\n \"acc_stderr\": 0.03614665424180826,\n \"acc_norm\": 0.6589595375722543,\n \"acc_norm_stderr\": 0.03614665424180826\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.4117647058823529,\n \"acc_stderr\": 0.048971049527263666,\n \"acc_norm\": 0.4117647058823529,\n \"acc_norm_stderr\": 0.048971049527263666\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.76,\n \"acc_stderr\": 0.042923469599092816,\n \"acc_norm\": 0.76,\n \"acc_norm_stderr\": 0.042923469599092816\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.5872340425531914,\n \"acc_stderr\": 0.03218471141400351,\n \"acc_norm\": 0.5872340425531914,\n \"acc_norm_stderr\": 0.03218471141400351\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.49122807017543857,\n \"acc_stderr\": 0.04702880432049615,\n \"acc_norm\": 0.49122807017543857,\n \"acc_norm_stderr\": 0.04702880432049615\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5793103448275863,\n \"acc_stderr\": 0.0411391498118926,\n \"acc_norm\": 0.5793103448275863,\n \"acc_norm_stderr\": 0.0411391498118926\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.4126984126984127,\n \"acc_stderr\": 0.025355741263055273,\n \"acc_norm\": 0.4126984126984127,\n \"acc_norm_stderr\": 0.025355741263055273\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.48412698412698413,\n \"acc_stderr\": 0.04469881854072606,\n \"acc_norm\": 0.48412698412698413,\n \"acc_norm_stderr\": 0.04469881854072606\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.36,\n \"acc_stderr\": 0.048241815132442176,\n \"acc_norm\": 0.36,\n \"acc_norm_stderr\": 0.048241815132442176\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.7870967741935484,\n \"acc_stderr\": 0.023287665127268552,\n \"acc_norm\": 0.7870967741935484,\n \"acc_norm_stderr\": 0.023287665127268552\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.4975369458128079,\n \"acc_stderr\": 0.03517945038691063,\n \"acc_norm\": 0.4975369458128079,\n \"acc_norm_stderr\": 0.03517945038691063\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.73,\n \"acc_stderr\": 0.044619604333847394,\n \"acc_norm\": 0.73,\n \"acc_norm_stderr\": 0.044619604333847394\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7878787878787878,\n \"acc_stderr\": 0.03192271569548301,\n \"acc_norm\": 0.7878787878787878,\n \"acc_norm_stderr\": 0.03192271569548301\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.803030303030303,\n \"acc_stderr\": 0.028335609732463362,\n \"acc_norm\": 0.803030303030303,\n \"acc_norm_stderr\": 0.028335609732463362\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.917098445595855,\n \"acc_stderr\": 0.01989934131572178,\n \"acc_norm\": 0.917098445595855,\n \"acc_norm_stderr\": 0.01989934131572178\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.676923076923077,\n \"acc_stderr\": 0.02371088850197057,\n \"acc_norm\": 0.676923076923077,\n \"acc_norm_stderr\": 0.02371088850197057\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.32592592592592595,\n \"acc_stderr\": 0.02857834836547308,\n \"acc_norm\": 0.32592592592592595,\n \"acc_norm_stderr\": 0.02857834836547308\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.680672268907563,\n \"acc_stderr\": 0.030283995525884396,\n \"acc_norm\": 0.680672268907563,\n \"acc_norm_stderr\": 0.030283995525884396\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.37748344370860926,\n \"acc_stderr\": 0.0395802723112157,\n \"acc_norm\": 0.37748344370860926,\n \"acc_norm_stderr\": 0.0395802723112157\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.8550458715596331,\n \"acc_stderr\": 0.015094215699700488,\n \"acc_norm\": 0.8550458715596331,\n \"acc_norm_stderr\": 0.015094215699700488\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.5,\n \"acc_stderr\": 0.034099716973523674,\n \"acc_norm\": 0.5,\n \"acc_norm_stderr\": 0.034099716973523674\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.8578431372549019,\n \"acc_stderr\": 0.02450980392156861,\n \"acc_norm\": 0.8578431372549019,\n \"acc_norm_stderr\": 0.02450980392156861\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.8270042194092827,\n \"acc_stderr\": 0.024621562866768427,\n \"acc_norm\": 0.8270042194092827,\n \"acc_norm_stderr\": 0.024621562866768427\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6816143497757847,\n \"acc_stderr\": 0.03126580522513713,\n \"acc_norm\": 0.6816143497757847,\n \"acc_norm_stderr\": 0.03126580522513713\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.7709923664122137,\n \"acc_stderr\": 0.036853466317118506,\n \"acc_norm\": 0.7709923664122137,\n \"acc_norm_stderr\": 0.036853466317118506\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.7933884297520661,\n \"acc_stderr\": 0.03695980128098824,\n \"acc_norm\": 0.7933884297520661,\n \"acc_norm_stderr\": 0.03695980128098824\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.7870370370370371,\n \"acc_stderr\": 0.0395783547198098,\n \"acc_norm\": 0.7870370370370371,\n \"acc_norm_stderr\": 0.0395783547198098\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.7730061349693251,\n \"acc_stderr\": 0.03291099578615769,\n \"acc_norm\": 0.7730061349693251,\n \"acc_norm_stderr\": 0.03291099578615769\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.5089285714285714,\n \"acc_stderr\": 0.04745033255489123,\n \"acc_norm\": 0.5089285714285714,\n \"acc_norm_stderr\": 0.04745033255489123\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.8058252427184466,\n \"acc_stderr\": 0.039166677628225836,\n \"acc_norm\": 0.8058252427184466,\n \"acc_norm_stderr\": 0.039166677628225836\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8632478632478633,\n \"acc_stderr\": 0.022509033937077802,\n \"acc_norm\": 0.8632478632478633,\n \"acc_norm_stderr\": 0.022509033937077802\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.72,\n \"acc_stderr\": 0.045126085985421276,\n \"acc_norm\": 0.72,\n \"acc_norm_stderr\": 0.045126085985421276\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.8365261813537676,\n \"acc_stderr\": 0.013223928616741624,\n \"acc_norm\": 0.8365261813537676,\n \"acc_norm_stderr\": 0.013223928616741624\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.7167630057803468,\n \"acc_stderr\": 0.02425790170532338,\n \"acc_norm\": 0.7167630057803468,\n \"acc_norm_stderr\": 0.02425790170532338\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.41675977653631285,\n \"acc_stderr\": 0.016489134962438954,\n \"acc_norm\": 0.41675977653631285,\n \"acc_norm_stderr\": 0.016489134962438954\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.7189542483660131,\n \"acc_stderr\": 0.025738854797818737,\n \"acc_norm\": 0.7189542483660131,\n \"acc_norm_stderr\": 0.025738854797818737\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.7009646302250804,\n \"acc_stderr\": 0.026003301117885135,\n \"acc_norm\": 0.7009646302250804,\n \"acc_norm_stderr\": 0.026003301117885135\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.7160493827160493,\n \"acc_stderr\": 0.025089478523765137,\n \"acc_norm\": 0.7160493827160493,\n \"acc_norm_stderr\": 0.025089478523765137\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.475177304964539,\n \"acc_stderr\": 0.02979071924382972,\n \"acc_norm\": 0.475177304964539,\n \"acc_norm_stderr\": 0.02979071924382972\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.4595827900912647,\n \"acc_stderr\": 0.01272844606766997,\n \"acc_norm\": 0.4595827900912647,\n \"acc_norm_stderr\": 0.01272844606766997\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.6911764705882353,\n \"acc_stderr\": 0.02806499816704009,\n \"acc_norm\": 0.6911764705882353,\n \"acc_norm_stderr\": 0.02806499816704009\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.6683006535947712,\n \"acc_stderr\": 0.019047485239360378,\n \"acc_norm\": 0.6683006535947712,\n \"acc_norm_stderr\": 0.019047485239360378\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6909090909090909,\n \"acc_stderr\": 0.044262946482000985,\n \"acc_norm\": 0.6909090909090909,\n \"acc_norm_stderr\": 0.044262946482000985\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.7387755102040816,\n \"acc_stderr\": 0.028123429335142783,\n \"acc_norm\": 0.7387755102040816,\n \"acc_norm_stderr\": 0.028123429335142783\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.8507462686567164,\n \"acc_stderr\": 0.025196929874827072,\n \"acc_norm\": 0.8507462686567164,\n \"acc_norm_stderr\": 0.025196929874827072\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.87,\n \"acc_stderr\": 0.03379976689896309,\n \"acc_norm\": 0.87,\n \"acc_norm_stderr\": 0.03379976689896309\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.536144578313253,\n \"acc_stderr\": 0.038823108508905954,\n \"acc_norm\": 0.536144578313253,\n \"acc_norm_stderr\": 0.038823108508905954\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8070175438596491,\n \"acc_stderr\": 0.030267457554898458,\n \"acc_norm\": 0.8070175438596491,\n \"acc_norm_stderr\": 0.030267457554898458\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.47613219094247244,\n \"mc1_stderr\": 0.017483547156961574,\n \"mc2\": 0.6295900737174474,\n \"mc2_stderr\": 0.015194573521166509\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.8082083662194159,\n \"acc_stderr\": 0.011065209664659527\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.6345716451857468,\n \"acc_stderr\": 0.013264282030266635\n }\n}\n```", "repo_url": "https://huggingface.co/janhq/supermario-slerp-v2", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|arc:challenge|25_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|gsm8k|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hellaswag|10_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T07-20-29.210830.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["**/details_harness|winogrande|5_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T07-20-29.210830.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T07_20_29.210830", "path": ["results_2023-12-12T07-20-29.210830.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T07-20-29.210830.parquet"]}]}]}
2023-12-12T07:24:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of janhq/supermario-slerp-v2 Dataset automatically created during the evaluation run of model janhq/supermario-slerp-v2 on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T07:20:29.210830(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of janhq/supermario-slerp-v2\n\n\n\nDataset automatically created during the evaluation run of model janhq/supermario-slerp-v2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T07:20:29.210830(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of janhq/supermario-slerp-v2\n\n\n\nDataset automatically created during the evaluation run of model janhq/supermario-slerp-v2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T07:20:29.210830(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 187, 67, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of janhq/supermario-slerp-v2\n\n\n\nDataset automatically created during the evaluation run of model janhq/supermario-slerp-v2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T07:20:29.210830(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
91ccafb5229edc75c32d95624d2ca0f1dc6b7f99
# Dataset Card for "Soldering-Data-pix2pix-1209-white-1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ouvic215/Soldering-Data-pix2pix-1209-white-1
[ "region:us" ]
2023-12-12T07:29:06+00:00
{"dataset_info": {"features": [{"name": "mask_image", "dtype": "image"}, {"name": "text", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 511555221.25, "num_examples": 6799}], "download_size": 510366317, "dataset_size": 511555221.25}}
2023-12-12T07:34:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Soldering-Data-pix2pix-1209-white-1" More Information needed
[ "# Dataset Card for \"Soldering-Data-pix2pix-1209-white-1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Soldering-Data-pix2pix-1209-white-1\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Soldering-Data-pix2pix-1209-white-1\"\n\nMore Information needed" ]
976186687f7a136f218b24dd80d0fbd3d65961f2
converted https://huggingface.co/datasets/argilla/comparison-data-falcon-with-feedback?row=0 to more proper format
umarigan/falcon_feedback_instruction
[ "region:us" ]
2023-12-12T07:29:35+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "chosen", "dtype": "string"}, {"name": "rejected", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 7942624, "num_examples": 7401}], "download_size": 5146500, "dataset_size": 7942624}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-13T06:52:08+00:00
[]
[]
TAGS #region-us
converted URL to more proper format
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
798d38e439aedb3961b9ade6e371eb311fabdab0
# Dataset Card for Evaluation run of xzuyn/GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [xzuyn/GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M](https://huggingface.co/xzuyn/GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_xzuyn__GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-12-12T07:34:24.161560](https://huggingface.co/datasets/open-llm-leaderboard/details_xzuyn__GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M/blob/main/results_2023-12-12T07-34-24.161560.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.2574568669647554, "acc_stderr": 0.03077370055460631, "acc_norm": 0.258555233951132, "acc_norm_stderr": 0.031529415596877046, "mc1": 0.25091799265605874, "mc1_stderr": 0.015176985027707687, "mc2": 0.38841425251899087, "mc2_stderr": 0.014859033830490542 }, "harness|arc:challenge|25": { "acc": 0.1945392491467577, "acc_stderr": 0.011567709174648728, "acc_norm": 0.24573378839590443, "acc_norm_stderr": 0.012581033453730113 }, "harness|hellaswag|10": { "acc": 0.2842063333997212, "acc_stderr": 0.004501137895230712, "acc_norm": 0.29426409081856203, "acc_norm_stderr": 0.00454779896412667 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.24, "acc_stderr": 0.04292346959909283, "acc_norm": 0.24, "acc_norm_stderr": 0.04292346959909283 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.2518518518518518, "acc_stderr": 0.037498507091740234, "acc_norm": 0.2518518518518518, "acc_norm_stderr": 0.037498507091740234 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.17763157894736842, "acc_stderr": 0.031103182383123398, "acc_norm": 0.17763157894736842, "acc_norm_stderr": 0.031103182383123398 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.22, "acc_stderr": 0.0416333199893227, "acc_norm": 0.22, "acc_norm_stderr": 0.0416333199893227 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.2, "acc_stderr": 0.02461829819586651, "acc_norm": 0.2, "acc_norm_stderr": 0.02461829819586651 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.2569444444444444, "acc_stderr": 0.03653946969442099, "acc_norm": 0.2569444444444444, "acc_norm_stderr": 0.03653946969442099 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.23, "acc_stderr": 0.04229525846816505, "acc_norm": 0.23, "acc_norm_stderr": 0.04229525846816505 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.26, "acc_stderr": 0.0440844002276808, "acc_norm": 0.26, "acc_norm_stderr": 0.0440844002276808 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.29, "acc_stderr": 0.04560480215720684, "acc_norm": 0.29, "acc_norm_stderr": 0.04560480215720684 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.26011560693641617, "acc_stderr": 0.03345036916788992, "acc_norm": 0.26011560693641617, "acc_norm_stderr": 0.03345036916788992 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.23529411764705882, "acc_stderr": 0.04220773659171453, "acc_norm": 0.23529411764705882, "acc_norm_stderr": 0.04220773659171453 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.15, "acc_stderr": 0.035887028128263734, "acc_norm": 0.15, "acc_norm_stderr": 0.035887028128263734 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.2765957446808511, "acc_stderr": 0.0292418838696288, "acc_norm": 0.2765957446808511, "acc_norm_stderr": 0.0292418838696288 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.2719298245614035, "acc_stderr": 0.04185774424022056, "acc_norm": 0.2719298245614035, "acc_norm_stderr": 0.04185774424022056 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.33793103448275863, "acc_stderr": 0.039417076320648906, "acc_norm": 0.33793103448275863, "acc_norm_stderr": 0.039417076320648906 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.24867724867724866, "acc_stderr": 0.02226181769240019, "acc_norm": 0.24867724867724866, "acc_norm_stderr": 0.02226181769240019 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.18253968253968253, "acc_stderr": 0.03455071019102146, "acc_norm": 0.18253968253968253, "acc_norm_stderr": 0.03455071019102146 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.19, "acc_stderr": 0.039427724440366234, "acc_norm": 0.19, "acc_norm_stderr": 0.039427724440366234 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.3064516129032258, "acc_stderr": 0.026226485652553883, "acc_norm": 0.3064516129032258, "acc_norm_stderr": 0.026226485652553883 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.2561576354679803, "acc_stderr": 0.030712730070982592, "acc_norm": 0.2561576354679803, "acc_norm_stderr": 0.030712730070982592 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.3, "acc_stderr": 0.046056618647183814, "acc_norm": 0.3, "acc_norm_stderr": 0.046056618647183814 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.21818181818181817, "acc_stderr": 0.03225078108306289, "acc_norm": 0.21818181818181817, "acc_norm_stderr": 0.03225078108306289 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.3434343434343434, "acc_stderr": 0.033832012232444426, "acc_norm": 0.3434343434343434, "acc_norm_stderr": 0.033832012232444426 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.37823834196891193, "acc_stderr": 0.034998072761933396, "acc_norm": 0.37823834196891193, "acc_norm_stderr": 0.034998072761933396 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.35128205128205126, "acc_stderr": 0.024203665177902803, "acc_norm": 0.35128205128205126, "acc_norm_stderr": 0.024203665177902803 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.2518518518518518, "acc_stderr": 0.02646611753895991, "acc_norm": 0.2518518518518518, "acc_norm_stderr": 0.02646611753895991 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.24369747899159663, "acc_stderr": 0.027886828078380565, "acc_norm": 0.24369747899159663, "acc_norm_stderr": 0.027886828078380565 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.2980132450331126, "acc_stderr": 0.037345356767871984, "acc_norm": 0.2980132450331126, "acc_norm_stderr": 0.037345356767871984 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.3211009174311927, "acc_stderr": 0.020018149772733744, "acc_norm": 0.3211009174311927, "acc_norm_stderr": 0.020018149772733744 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.4675925925925926, "acc_stderr": 0.03402801581358966, "acc_norm": 0.4675925925925926, "acc_norm_stderr": 0.03402801581358966 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.2647058823529412, "acc_stderr": 0.03096451792692341, "acc_norm": 0.2647058823529412, "acc_norm_stderr": 0.03096451792692341 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.2320675105485232, "acc_stderr": 0.027479744550808514, "acc_norm": 0.2320675105485232, "acc_norm_stderr": 0.027479744550808514 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.23766816143497757, "acc_stderr": 0.02856807946471428, "acc_norm": 0.23766816143497757, "acc_norm_stderr": 0.02856807946471428 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.1984732824427481, "acc_stderr": 0.03498149385462472, "acc_norm": 0.1984732824427481, "acc_norm_stderr": 0.03498149385462472 }, "harness|hendrycksTest-international_law|5": { "acc": 0.2892561983471074, "acc_stderr": 0.04139112727635464, "acc_norm": 0.2892561983471074, "acc_norm_stderr": 0.04139112727635464 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.25, "acc_stderr": 0.04186091791394607, "acc_norm": 0.25, "acc_norm_stderr": 0.04186091791394607 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.26993865030674846, "acc_stderr": 0.034878251684978906, "acc_norm": 0.26993865030674846, "acc_norm_stderr": 0.034878251684978906 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.23214285714285715, "acc_stderr": 0.04007341809755806, "acc_norm": 0.23214285714285715, "acc_norm_stderr": 0.04007341809755806 }, "harness|hendrycksTest-management|5": { "acc": 0.23300970873786409, "acc_stderr": 0.041858325989283136, "acc_norm": 0.23300970873786409, "acc_norm_stderr": 0.041858325989283136 }, "harness|hendrycksTest-marketing|5": { "acc": 0.19230769230769232, "acc_stderr": 0.0258192332564837, "acc_norm": 0.19230769230769232, "acc_norm_stderr": 0.0258192332564837 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.3, "acc_stderr": 0.046056618647183814, "acc_norm": 0.3, "acc_norm_stderr": 0.046056618647183814 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.2796934865900383, "acc_stderr": 0.016050792148036546, "acc_norm": 0.2796934865900383, "acc_norm_stderr": 0.016050792148036546 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.23410404624277456, "acc_stderr": 0.02279711027807114, "acc_norm": 0.23410404624277456, "acc_norm_stderr": 0.02279711027807114 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.2424581005586592, "acc_stderr": 0.014333522059217889, "acc_norm": 0.2424581005586592, "acc_norm_stderr": 0.014333522059217889 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.24183006535947713, "acc_stderr": 0.024518195641879334, "acc_norm": 0.24183006535947713, "acc_norm_stderr": 0.024518195641879334 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.26366559485530544, "acc_stderr": 0.02502553850053234, "acc_norm": 0.26366559485530544, "acc_norm_stderr": 0.02502553850053234 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.2222222222222222, "acc_stderr": 0.023132376234543332, "acc_norm": 0.2222222222222222, "acc_norm_stderr": 0.023132376234543332 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.2801418439716312, "acc_stderr": 0.026789172351140245, "acc_norm": 0.2801418439716312, "acc_norm_stderr": 0.026789172351140245 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.23663624511082137, "acc_stderr": 0.010855137351572735, "acc_norm": 0.23663624511082137, "acc_norm_stderr": 0.010855137351572735 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.4338235294117647, "acc_stderr": 0.030105636570016643, "acc_norm": 0.4338235294117647, "acc_norm_stderr": 0.030105636570016643 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.2647058823529412, "acc_stderr": 0.017848089574913226, "acc_norm": 0.2647058823529412, "acc_norm_stderr": 0.017848089574913226 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.17272727272727273, "acc_stderr": 0.0362069183392922, "acc_norm": 0.17272727272727273, "acc_norm_stderr": 0.0362069183392922 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.2163265306122449, "acc_stderr": 0.026358916334904035, "acc_norm": 0.2163265306122449, "acc_norm_stderr": 0.026358916334904035 }, "harness|hendrycksTest-sociology|5": { "acc": 0.23880597014925373, "acc_stderr": 0.030147775935409217, "acc_norm": 0.23880597014925373, "acc_norm_stderr": 0.030147775935409217 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.24, "acc_stderr": 0.04292346959909281, "acc_norm": 0.24, "acc_norm_stderr": 0.04292346959909281 }, "harness|hendrycksTest-virology|5": { "acc": 0.1927710843373494, "acc_stderr": 0.03070982405056527, "acc_norm": 0.1927710843373494, "acc_norm_stderr": 0.03070982405056527 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.23976608187134502, "acc_stderr": 0.03274485211946956, "acc_norm": 0.23976608187134502, "acc_norm_stderr": 0.03274485211946956 }, "harness|truthfulqa:mc|0": { "mc1": 0.25091799265605874, "mc1_stderr": 0.015176985027707687, "mc2": 0.38841425251899087, "mc2_stderr": 0.014859033830490542 }, "harness|winogrande|5": { "acc": 0.49013417521704816, "acc_stderr": 0.014049749833367589 }, "harness|gsm8k|5": { "acc": 0.02122820318423048, "acc_stderr": 0.003970449129848635 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_xzuyn__GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M
[ "region:us" ]
2023-12-12T07:36:01+00:00
{"pretty_name": "Evaluation run of xzuyn/GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M", "dataset_summary": "Dataset automatically created during the evaluation run of model [xzuyn/GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M](https://huggingface.co/xzuyn/GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_xzuyn__GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-12-12T07:34:24.161560](https://huggingface.co/datasets/open-llm-leaderboard/details_xzuyn__GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M/blob/main/results_2023-12-12T07-34-24.161560.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.2574568669647554,\n \"acc_stderr\": 0.03077370055460631,\n \"acc_norm\": 0.258555233951132,\n \"acc_norm_stderr\": 0.031529415596877046,\n \"mc1\": 0.25091799265605874,\n \"mc1_stderr\": 0.015176985027707687,\n \"mc2\": 0.38841425251899087,\n \"mc2_stderr\": 0.014859033830490542\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.1945392491467577,\n \"acc_stderr\": 0.011567709174648728,\n \"acc_norm\": 0.24573378839590443,\n \"acc_norm_stderr\": 0.012581033453730113\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.2842063333997212,\n \"acc_stderr\": 0.004501137895230712,\n \"acc_norm\": 0.29426409081856203,\n \"acc_norm_stderr\": 0.00454779896412667\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.24,\n \"acc_stderr\": 0.04292346959909283,\n \"acc_norm\": 0.24,\n \"acc_norm_stderr\": 0.04292346959909283\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.2518518518518518,\n \"acc_stderr\": 0.037498507091740234,\n \"acc_norm\": 0.2518518518518518,\n \"acc_norm_stderr\": 0.037498507091740234\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.17763157894736842,\n \"acc_stderr\": 0.031103182383123398,\n \"acc_norm\": 0.17763157894736842,\n \"acc_norm_stderr\": 0.031103182383123398\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.22,\n \"acc_stderr\": 0.0416333199893227,\n \"acc_norm\": 0.22,\n \"acc_norm_stderr\": 0.0416333199893227\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.2,\n \"acc_stderr\": 0.02461829819586651,\n \"acc_norm\": 0.2,\n \"acc_norm_stderr\": 0.02461829819586651\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.2569444444444444,\n \"acc_stderr\": 0.03653946969442099,\n \"acc_norm\": 0.2569444444444444,\n \"acc_norm_stderr\": 0.03653946969442099\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.23,\n \"acc_stderr\": 0.04229525846816505,\n \"acc_norm\": 0.23,\n \"acc_norm_stderr\": 0.04229525846816505\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.26,\n \"acc_stderr\": 0.0440844002276808,\n \"acc_norm\": 0.26,\n \"acc_norm_stderr\": 0.0440844002276808\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.29,\n \"acc_stderr\": 0.04560480215720684,\n \"acc_norm\": 0.29,\n \"acc_norm_stderr\": 0.04560480215720684\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.26011560693641617,\n \"acc_stderr\": 0.03345036916788992,\n \"acc_norm\": 0.26011560693641617,\n \"acc_norm_stderr\": 0.03345036916788992\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.23529411764705882,\n \"acc_stderr\": 0.04220773659171453,\n \"acc_norm\": 0.23529411764705882,\n \"acc_norm_stderr\": 0.04220773659171453\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.15,\n \"acc_stderr\": 0.035887028128263734,\n \"acc_norm\": 0.15,\n \"acc_norm_stderr\": 0.035887028128263734\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.2765957446808511,\n \"acc_stderr\": 0.0292418838696288,\n \"acc_norm\": 0.2765957446808511,\n \"acc_norm_stderr\": 0.0292418838696288\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.2719298245614035,\n \"acc_stderr\": 0.04185774424022056,\n \"acc_norm\": 0.2719298245614035,\n \"acc_norm_stderr\": 0.04185774424022056\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.33793103448275863,\n \"acc_stderr\": 0.039417076320648906,\n \"acc_norm\": 0.33793103448275863,\n \"acc_norm_stderr\": 0.039417076320648906\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.24867724867724866,\n \"acc_stderr\": 0.02226181769240019,\n \"acc_norm\": 0.24867724867724866,\n \"acc_norm_stderr\": 0.02226181769240019\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.18253968253968253,\n \"acc_stderr\": 0.03455071019102146,\n \"acc_norm\": 0.18253968253968253,\n \"acc_norm_stderr\": 0.03455071019102146\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.19,\n \"acc_stderr\": 0.039427724440366234,\n \"acc_norm\": 0.19,\n \"acc_norm_stderr\": 0.039427724440366234\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.3064516129032258,\n \"acc_stderr\": 0.026226485652553883,\n \"acc_norm\": 0.3064516129032258,\n \"acc_norm_stderr\": 0.026226485652553883\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.2561576354679803,\n \"acc_stderr\": 0.030712730070982592,\n \"acc_norm\": 0.2561576354679803,\n \"acc_norm_stderr\": 0.030712730070982592\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.3,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.3,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.21818181818181817,\n \"acc_stderr\": 0.03225078108306289,\n \"acc_norm\": 0.21818181818181817,\n \"acc_norm_stderr\": 0.03225078108306289\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.3434343434343434,\n \"acc_stderr\": 0.033832012232444426,\n \"acc_norm\": 0.3434343434343434,\n \"acc_norm_stderr\": 0.033832012232444426\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.37823834196891193,\n \"acc_stderr\": 0.034998072761933396,\n \"acc_norm\": 0.37823834196891193,\n \"acc_norm_stderr\": 0.034998072761933396\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.35128205128205126,\n \"acc_stderr\": 0.024203665177902803,\n \"acc_norm\": 0.35128205128205126,\n \"acc_norm_stderr\": 0.024203665177902803\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.2518518518518518,\n \"acc_stderr\": 0.02646611753895991,\n \"acc_norm\": 0.2518518518518518,\n \"acc_norm_stderr\": 0.02646611753895991\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.24369747899159663,\n \"acc_stderr\": 0.027886828078380565,\n \"acc_norm\": 0.24369747899159663,\n \"acc_norm_stderr\": 0.027886828078380565\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.2980132450331126,\n \"acc_stderr\": 0.037345356767871984,\n \"acc_norm\": 0.2980132450331126,\n \"acc_norm_stderr\": 0.037345356767871984\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.3211009174311927,\n \"acc_stderr\": 0.020018149772733744,\n \"acc_norm\": 0.3211009174311927,\n \"acc_norm_stderr\": 0.020018149772733744\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.4675925925925926,\n \"acc_stderr\": 0.03402801581358966,\n \"acc_norm\": 0.4675925925925926,\n \"acc_norm_stderr\": 0.03402801581358966\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.2647058823529412,\n \"acc_stderr\": 0.03096451792692341,\n \"acc_norm\": 0.2647058823529412,\n \"acc_norm_stderr\": 0.03096451792692341\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.2320675105485232,\n \"acc_stderr\": 0.027479744550808514,\n \"acc_norm\": 0.2320675105485232,\n \"acc_norm_stderr\": 0.027479744550808514\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.23766816143497757,\n \"acc_stderr\": 0.02856807946471428,\n \"acc_norm\": 0.23766816143497757,\n \"acc_norm_stderr\": 0.02856807946471428\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.1984732824427481,\n \"acc_stderr\": 0.03498149385462472,\n \"acc_norm\": 0.1984732824427481,\n \"acc_norm_stderr\": 0.03498149385462472\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.2892561983471074,\n \"acc_stderr\": 0.04139112727635464,\n \"acc_norm\": 0.2892561983471074,\n \"acc_norm_stderr\": 0.04139112727635464\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.25,\n \"acc_stderr\": 0.04186091791394607,\n \"acc_norm\": 0.25,\n \"acc_norm_stderr\": 0.04186091791394607\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.26993865030674846,\n \"acc_stderr\": 0.034878251684978906,\n \"acc_norm\": 0.26993865030674846,\n \"acc_norm_stderr\": 0.034878251684978906\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.23214285714285715,\n \"acc_stderr\": 0.04007341809755806,\n \"acc_norm\": 0.23214285714285715,\n \"acc_norm_stderr\": 0.04007341809755806\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.23300970873786409,\n \"acc_stderr\": 0.041858325989283136,\n \"acc_norm\": 0.23300970873786409,\n \"acc_norm_stderr\": 0.041858325989283136\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.19230769230769232,\n \"acc_stderr\": 0.0258192332564837,\n \"acc_norm\": 0.19230769230769232,\n \"acc_norm_stderr\": 0.0258192332564837\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.3,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.3,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.2796934865900383,\n \"acc_stderr\": 0.016050792148036546,\n \"acc_norm\": 0.2796934865900383,\n \"acc_norm_stderr\": 0.016050792148036546\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.23410404624277456,\n \"acc_stderr\": 0.02279711027807114,\n \"acc_norm\": 0.23410404624277456,\n \"acc_norm_stderr\": 0.02279711027807114\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.2424581005586592,\n \"acc_stderr\": 0.014333522059217889,\n \"acc_norm\": 0.2424581005586592,\n \"acc_norm_stderr\": 0.014333522059217889\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.24183006535947713,\n \"acc_stderr\": 0.024518195641879334,\n \"acc_norm\": 0.24183006535947713,\n \"acc_norm_stderr\": 0.024518195641879334\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.26366559485530544,\n \"acc_stderr\": 0.02502553850053234,\n \"acc_norm\": 0.26366559485530544,\n \"acc_norm_stderr\": 0.02502553850053234\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.2222222222222222,\n \"acc_stderr\": 0.023132376234543332,\n \"acc_norm\": 0.2222222222222222,\n \"acc_norm_stderr\": 0.023132376234543332\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.2801418439716312,\n \"acc_stderr\": 0.026789172351140245,\n \"acc_norm\": 0.2801418439716312,\n \"acc_norm_stderr\": 0.026789172351140245\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.23663624511082137,\n \"acc_stderr\": 0.010855137351572735,\n \"acc_norm\": 0.23663624511082137,\n \"acc_norm_stderr\": 0.010855137351572735\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.4338235294117647,\n \"acc_stderr\": 0.030105636570016643,\n \"acc_norm\": 0.4338235294117647,\n \"acc_norm_stderr\": 0.030105636570016643\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.2647058823529412,\n \"acc_stderr\": 0.017848089574913226,\n \"acc_norm\": 0.2647058823529412,\n \"acc_norm_stderr\": 0.017848089574913226\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.17272727272727273,\n \"acc_stderr\": 0.0362069183392922,\n \"acc_norm\": 0.17272727272727273,\n \"acc_norm_stderr\": 0.0362069183392922\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.2163265306122449,\n \"acc_stderr\": 0.026358916334904035,\n \"acc_norm\": 0.2163265306122449,\n \"acc_norm_stderr\": 0.026358916334904035\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.23880597014925373,\n \"acc_stderr\": 0.030147775935409217,\n \"acc_norm\": 0.23880597014925373,\n \"acc_norm_stderr\": 0.030147775935409217\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.24,\n \"acc_stderr\": 0.04292346959909281,\n \"acc_norm\": 0.24,\n \"acc_norm_stderr\": 0.04292346959909281\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.1927710843373494,\n \"acc_stderr\": 0.03070982405056527,\n \"acc_norm\": 0.1927710843373494,\n \"acc_norm_stderr\": 0.03070982405056527\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.23976608187134502,\n \"acc_stderr\": 0.03274485211946956,\n \"acc_norm\": 0.23976608187134502,\n \"acc_norm_stderr\": 0.03274485211946956\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.25091799265605874,\n \"mc1_stderr\": 0.015176985027707687,\n \"mc2\": 0.38841425251899087,\n \"mc2_stderr\": 0.014859033830490542\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.49013417521704816,\n \"acc_stderr\": 0.014049749833367589\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.02122820318423048,\n \"acc_stderr\": 0.003970449129848635\n }\n}\n```", "repo_url": "https://huggingface.co/xzuyn/GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|arc:challenge|25_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|gsm8k|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hellaswag|10_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-12T07-34-24.161560.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["**/details_harness|winogrande|5_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-12-12T07-34-24.161560.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_12_12T07_34_24.161560", "path": ["results_2023-12-12T07-34-24.161560.parquet"]}, {"split": "latest", "path": ["results_2023-12-12T07-34-24.161560.parquet"]}]}]}
2023-12-12T07:36:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of xzuyn/GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M Dataset automatically created during the evaluation run of model xzuyn/GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-12-12T07:34:24.161560(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of xzuyn/GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M\n\n\n\nDataset automatically created during the evaluation run of model xzuyn/GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T07:34:24.161560(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of xzuyn/GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M\n\n\n\nDataset automatically created during the evaluation run of model xzuyn/GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-12-12T07:34:24.161560(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 225, 66, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of xzuyn/GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M\n\n\n\nDataset automatically created during the evaluation run of model xzuyn/GPT-2-SlimOrcaDeduped-airoboros-3.1-MetaMathQA-SFT-124M on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-12-12T07:34:24.161560(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations" ]
b20a3a95a05b0273fc49417e3afcfe599bd3e8f1
# Dataset Card for "ParlaSpeech-HR" Mirror of http://hdl.handle.net/11356/1494 . The ParlaSpeech-HR dataset is built from parliamentary proceedings available in the Croatian part of the ParlaMint corpus and the parliamentary recordings available from the Croatian Parliament's YouTube channel. The corpus consists of segments 8-20 seconds in length. There are two transcripts available: the original one, and the one normalised via a simple rule-based normaliser. Each of the transcripts contains word-level alignments to the recordings. Each segment has a reference to the ParlaMint 2.1 corpus (http://hdl.handle.net/11356/1432) via utterance IDs. If a segment is based on a single utterance, speaker information for that segment is available as well. There is speaker information available for 381,849 segments, i.e., 95% of all segments. Speaker information consists of all the speaker information available from the ParlaMint 2.1 corpus (name, party, gender, age, status, role). There are all together 309 speakers in the dataset. The dataset is divided into a training, a development, and a testing subset. Development data consist of 500 segments coming from the 5 most frequent speakers, with the goal of not losing speaker variety on dev data. Test data consist of 513 segments that come from 3 male (258 segments) and 3 female speakers (255 segments). There are no segments coming from the 6 test speakers in the two remaining subsets. The 22,076 instances not having speaker information are not assigned to any of the three subsets. The remaining 380,836 instances form the training set. Please cite as ``` @misc{11356/1494, title = {{ASR} training dataset for Croatian {ParlaSpeech}-{HR} v1.0}, author = {Ljube{\v s}i{\'c}, Nikola and Kor{\v z}inek, Danijel and Rupnik, Peter and Jazbec, Ivo-Pavao and Batanovi{\'c}, Vuk and Baj{\v c}eti{\'c}, Lenka and Evkoski, Bojan}, url = {http://hdl.handle.net/11356/1494}, note = {Slovenian language resource repository {CLARIN}.{SI}}, copyright = {Creative Commons - Attribution-{ShareAlike} 4.0 International ({CC} {BY}-{SA} 4.0)}, issn = {2820-4042}, year = {2022} } ```
5roop/ParlaSpeech-HR
[ "region:us" ]
2023-12-12T07:38:51+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "dev", "path": "data/dev-*"}]}], "dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "transcript", "dtype": "string"}, {"name": "norm_transcript", "dtype": "string"}, {"name": "utterance_id_start", "dtype": "string"}, {"name": "utterance_id_end", "dtype": "string"}, {"name": "split", "dtype": "string"}, {"name": "Speaker_role", "dtype": "string"}, {"name": "Speaker_type", "dtype": "string"}, {"name": "Speaker_party", "dtype": "string"}, {"name": "Speaker_party_name", "dtype": "string"}, {"name": "Party_status", "dtype": "string"}, {"name": "Speaker_name", "dtype": "string"}, {"name": "Speaker_gender", "dtype": "string"}, {"name": "Speaker_birth", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 131437814281.216, "num_examples": 402912}, {"name": "test", "num_bytes": 173387757.0, "num_examples": 513}, {"name": "dev", "num_bytes": 163546294.0, "num_examples": 500}], "download_size": 125983454130, "dataset_size": 131774748332.216}}
2023-12-12T14:16:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ParlaSpeech-HR" Mirror of URL . The ParlaSpeech-HR dataset is built from parliamentary proceedings available in the Croatian part of the ParlaMint corpus and the parliamentary recordings available from the Croatian Parliament's YouTube channel. The corpus consists of segments 8-20 seconds in length. There are two transcripts available: the original one, and the one normalised via a simple rule-based normaliser. Each of the transcripts contains word-level alignments to the recordings. Each segment has a reference to the ParlaMint 2.1 corpus (URL via utterance IDs. If a segment is based on a single utterance, speaker information for that segment is available as well. There is speaker information available for 381,849 segments, i.e., 95% of all segments. Speaker information consists of all the speaker information available from the ParlaMint 2.1 corpus (name, party, gender, age, status, role). There are all together 309 speakers in the dataset. The dataset is divided into a training, a development, and a testing subset. Development data consist of 500 segments coming from the 5 most frequent speakers, with the goal of not losing speaker variety on dev data. Test data consist of 513 segments that come from 3 male (258 segments) and 3 female speakers (255 segments). There are no segments coming from the 6 test speakers in the two remaining subsets. The 22,076 instances not having speaker information are not assigned to any of the three subsets. The remaining 380,836 instances form the training set. Please cite as
[ "# Dataset Card for \"ParlaSpeech-HR\"\n\nMirror of URL .\n\nThe ParlaSpeech-HR dataset is built from parliamentary proceedings available in the Croatian part of the ParlaMint corpus and \nthe parliamentary recordings available from the Croatian Parliament's YouTube channel. The corpus consists of segments 8-20 seconds \nin length. There are two transcripts available: the original one, and the one normalised via a simple rule-based normaliser. Each of the\ntranscripts contains word-level alignments to the recordings. Each segment has a reference to the ParlaMint 2.1 corpus \n(URL via utterance IDs. If a segment is based on a single utterance, \nspeaker information for that segment is available as well. There is speaker information available for \n381,849 segments, i.e., 95% of all segments. \nSpeaker information consists of all the speaker information available from the ParlaMint 2.1 corpus \n(name, party, gender, age, status, role). There are all together 309 speakers in the dataset.\n\nThe dataset is divided into a training, a development, and a testing subset. \nDevelopment data consist of 500 segments coming from the 5 most frequent speakers, with the goal of not losing speaker \nvariety on dev data. Test data consist of 513 segments that come from 3 male (258 segments) and 3 female speakers \n(255 segments). There are no segments coming from the 6 test speakers in the two remaining subsets. The 22,076 instances \nnot having speaker information are not assigned to any of the three subsets. The remaining 380,836 instances form the training set.\n\n\nPlease cite as" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ParlaSpeech-HR\"\n\nMirror of URL .\n\nThe ParlaSpeech-HR dataset is built from parliamentary proceedings available in the Croatian part of the ParlaMint corpus and \nthe parliamentary recordings available from the Croatian Parliament's YouTube channel. The corpus consists of segments 8-20 seconds \nin length. There are two transcripts available: the original one, and the one normalised via a simple rule-based normaliser. Each of the\ntranscripts contains word-level alignments to the recordings. Each segment has a reference to the ParlaMint 2.1 corpus \n(URL via utterance IDs. If a segment is based on a single utterance, \nspeaker information for that segment is available as well. There is speaker information available for \n381,849 segments, i.e., 95% of all segments. \nSpeaker information consists of all the speaker information available from the ParlaMint 2.1 corpus \n(name, party, gender, age, status, role). There are all together 309 speakers in the dataset.\n\nThe dataset is divided into a training, a development, and a testing subset. \nDevelopment data consist of 500 segments coming from the 5 most frequent speakers, with the goal of not losing speaker \nvariety on dev data. Test data consist of 513 segments that come from 3 male (258 segments) and 3 female speakers \n(255 segments). There are no segments coming from the 6 test speakers in the two remaining subsets. The 22,076 instances \nnot having speaker information are not assigned to any of the three subsets. The remaining 380,836 instances form the training set.\n\n\nPlease cite as" ]
[ 6, 376 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ParlaSpeech-HR\"\n\nMirror of URL .\n\nThe ParlaSpeech-HR dataset is built from parliamentary proceedings available in the Croatian part of the ParlaMint corpus and \nthe parliamentary recordings available from the Croatian Parliament's YouTube channel. The corpus consists of segments 8-20 seconds \nin length. There are two transcripts available: the original one, and the one normalised via a simple rule-based normaliser. Each of the\ntranscripts contains word-level alignments to the recordings. Each segment has a reference to the ParlaMint 2.1 corpus \n(URL via utterance IDs. If a segment is based on a single utterance, \nspeaker information for that segment is available as well. There is speaker information available for \n381,849 segments, i.e., 95% of all segments. \nSpeaker information consists of all the speaker information available from the ParlaMint 2.1 corpus \n(name, party, gender, age, status, role). There are all together 309 speakers in the dataset.\n\nThe dataset is divided into a training, a development, and a testing subset. \nDevelopment data consist of 500 segments coming from the 5 most frequent speakers, with the goal of not losing speaker \nvariety on dev data. Test data consist of 513 segments that come from 3 male (258 segments) and 3 female speakers \n(255 segments). There are no segments coming from the 6 test speakers in the two remaining subsets. The 22,076 instances \nnot having speaker information are not assigned to any of the three subsets. The remaining 380,836 instances form the training set.\n\n\nPlease cite as" ]
a3a4f89b03173c106e01b1abbe546f8f8ab14437
# Dataset Card for "Soldering-Data-pix2pix-1209-white-2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ouvic215/Soldering-Data-pix2pix-1209-white-2
[ "region:us" ]
2023-12-12T07:40:53+00:00
{"dataset_info": {"features": [{"name": "mask_image", "dtype": "image"}, {"name": "text", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 2087059454.5, "num_examples": 25950}], "download_size": 1724200255, "dataset_size": 2087059454.5}}
2023-12-12T07:44:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Soldering-Data-pix2pix-1209-white-2" More Information needed
[ "# Dataset Card for \"Soldering-Data-pix2pix-1209-white-2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Soldering-Data-pix2pix-1209-white-2\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Soldering-Data-pix2pix-1209-white-2\"\n\nMore Information needed" ]
41a05ec50f517931deb208c2e9d1f7a476385ef9
# The Effanie Dataset ![Logo](./effanie-logo-wordmark.png) This is the dataset for Effanie, the persuasive, confident, and helpful AI! There are some helpful files for creating the dataset yourself. These include: * [XLSM Conversion tool](./convertXLSM.py) * [Parquet Conversion tool](./convertParquet.py) * [The actual XLSM](./train.xlsm) This is based off of the [OpenOrca dataset.](https://huggingface.co/datasets/Open-Orca/OpenOrca)
josiauhlol/effanie-AI
[ "task_categories:question-answering", "task_categories:conversational", "language:en", "license:mit", "effanie", "chat", "region:us" ]
2023-12-12T08:01:00+00:00
{"language": ["en"], "license": "mit", "task_categories": ["question-answering", "conversational"], "tags": ["effanie", "chat"]}
2023-12-14T09:02:10+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #task_categories-conversational #language-English #license-mit #effanie #chat #region-us
# The Effanie Dataset !Logo This is the dataset for Effanie, the persuasive, confident, and helpful AI! There are some helpful files for creating the dataset yourself. These include: * XLSM Conversion tool * Parquet Conversion tool * The actual XLSM This is based off of the OpenOrca dataset.
[ "# The Effanie Dataset\n!Logo\n\nThis is the dataset for Effanie, the persuasive, confident, and helpful AI!\n\nThere are some helpful files for creating the dataset yourself. These include:\n* XLSM Conversion tool\n* Parquet Conversion tool\n* The actual XLSM\n\nThis is based off of the OpenOrca dataset." ]
[ "TAGS\n#task_categories-question-answering #task_categories-conversational #language-English #license-mit #effanie #chat #region-us \n", "# The Effanie Dataset\n!Logo\n\nThis is the dataset for Effanie, the persuasive, confident, and helpful AI!\n\nThere are some helpful files for creating the dataset yourself. These include:\n* XLSM Conversion tool\n* Parquet Conversion tool\n* The actual XLSM\n\nThis is based off of the OpenOrca dataset." ]
[ 42, 74 ]
[ "passage: TAGS\n#task_categories-question-answering #task_categories-conversational #language-English #license-mit #effanie #chat #region-us \n# The Effanie Dataset\n!Logo\n\nThis is the dataset for Effanie, the persuasive, confident, and helpful AI!\n\nThere are some helpful files for creating the dataset yourself. These include:\n* XLSM Conversion tool\n* Parquet Conversion tool\n* The actual XLSM\n\nThis is based off of the OpenOrca dataset." ]
10c59b948f6d6a69a7568a78c7429a421734ab3d
# Vietnamese Legal Closed QA dataset The dataset can be used for RAG Chatbot. It contains 160K samples where each sample in the dataset includes a question, K relevant contexts and an answer generated by OpenAI GPT-4. [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
thanhdath/vietnamese_legal_closed_qa
[ "region:us" ]
2023-12-12T08:21:47+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "selected_contexts", "sequence": "string"}, {"name": "answer", "dtype": "string"}, {"name": "provider", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 974327420, "num_examples": 171278}], "download_size": 346818235, "dataset_size": 974327420}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-20T09:57:59+00:00
[]
[]
TAGS #region-us
# Vietnamese Legal Closed QA dataset The dataset can be used for RAG Chatbot. It contains 160K samples where each sample in the dataset includes a question, K relevant contexts and an answer generated by OpenAI GPT-4. More Information needed
[ "# Vietnamese Legal Closed QA dataset\n\nThe dataset can be used for RAG Chatbot. It contains 160K samples where each sample in the dataset includes a question, K relevant contexts and an answer generated by OpenAI GPT-4.\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Vietnamese Legal Closed QA dataset\n\nThe dataset can be used for RAG Chatbot. It contains 160K samples where each sample in the dataset includes a question, K relevant contexts and an answer generated by OpenAI GPT-4.\n\nMore Information needed" ]
[ 6, 59 ]
[ "passage: TAGS\n#region-us \n# Vietnamese Legal Closed QA dataset\n\nThe dataset can be used for RAG Chatbot. It contains 160K samples where each sample in the dataset includes a question, K relevant contexts and an answer generated by OpenAI GPT-4.\n\nMore Information needed" ]
9cc086723aa21f2e61cf77dd0a37435e444b5595
# Crawl Youtube We crawled Malaysian and Singaporean youtube channels, total up to 60k audio files with total 185k hours. URLs data at https://github.com/mesolitica/malaya-speech/tree/master/data/youtube/data Notebooks at https://github.com/mesolitica/malaya-speech/tree/master/data/youtube ## How to load the data efficiently? ```python import pandas as pd import json from datasets import Audio from torch.utils.data import DataLoader, Dataset chunks = 30 sr = 16000 class Train(Dataset): def __init__(self, indices, maxlen_cache_df=5, maxlen_cache_audio=50): self.indices = {} for k, v in indices.items(): for i in range(int(k), v['start'] + v['end'], 1): self.indices[i] = v self.max_index = len(self.indices) self.cache_df = {} self.cache_audio = {} self.maxlen_cache_df = maxlen_cache_df self.maxlen_cache_audio = maxlen_cache_audio self.audio = Audio(sampling_rate=16000) def __len__(self): return self.max_index def __getitem__(self, item): if item < 0: item = self.max_index + item v = self.indices[item] key_row = f"{v['filename']}-{v['i']}" chunk_index = item - v['start'] if key_row not in self.cache_audio: if v['filename'] not in self.cache_df: df = pd.read_parquet(v['filename']) if len(self.cache_df) >= self.maxlen_cache_df: keys = list(self.cache_df.keys()) self.cache_df.pop(sorted(keys)[0], None) self.cache_df[v['filename']] = df else: df = self.cache_df[v['filename']] row = df.iloc[int(v['i'])] audio = self.audio.decode_example(self.audio.encode_example(row['filename'])) if len(self.cache_audio) >= self.maxlen_cache_audio: keys = list(self.cache_audio.keys()) self.cache_audio.pop(sorted(keys)[0], None) self.cache_audio[key_row] = audio else: audio = self.cache_audio[key_row] return { 'array': audio['array'][(chunks * sr) * chunk_index: (chunks * sr) * (chunk_index + 1)] } with open('crawl-youtube-global-indices.json') as fopen: global_indices = json.load(fopen) train = Train(global_indices) train[0] ``` ``` {'array': array([ 0. , 0. , 0. , ..., -0.00845753, 0.00168016, -0.00606468])} ``` This is global hashing indices if the audio chunked with 30 seconds, read more at https://github.com/mesolitica/malaya-speech/tree/malaysian-speech/data/semisupervised/pseudolabel-whisper ## Licensing ``` All the videos, songs, images, and graphics used in the video belong to their respective owners and I does not claim any right over them. Copyright Disclaimer under section 107 of the Copyright Act of 1976, allowance is made for "fair use" for purposes such as criticism, comment, news reporting, teaching, scholarship, education and research. Fair use is a use permitted by copyright statute that might otherwise be infringing. ```
malaysia-ai/crawl-youtube
[ "region:us" ]
2023-12-12T08:24:27+00:00
{"dataset_info": {"features": [{"name": "filename", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "url", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1068464089483.938, "num_examples": 59879}], "download_size": 16395869337, "dataset_size": 1068464089483.938}}
2023-12-23T23:17:32+00:00
[]
[]
TAGS #region-us
# Crawl Youtube We crawled Malaysian and Singaporean youtube channels, total up to 60k audio files with total 185k hours. URLs data at URL Notebooks at URL ## How to load the data efficiently? This is global hashing indices if the audio chunked with 30 seconds, read more at URL ## Licensing
[ "# Crawl Youtube\n\nWe crawled Malaysian and Singaporean youtube channels, total up to 60k audio files with total 185k hours.\n\nURLs data at URL\n\nNotebooks at URL", "## How to load the data efficiently?\n\n\n\n\n\nThis is global hashing indices if the audio chunked with 30 seconds, read more at URL", "## Licensing" ]
[ "TAGS\n#region-us \n", "# Crawl Youtube\n\nWe crawled Malaysian and Singaporean youtube channels, total up to 60k audio files with total 185k hours.\n\nURLs data at URL\n\nNotebooks at URL", "## How to load the data efficiently?\n\n\n\n\n\nThis is global hashing indices if the audio chunked with 30 seconds, read more at URL", "## Licensing" ]
[ 6, 38, 29, 4 ]
[ "passage: TAGS\n#region-us \n# Crawl Youtube\n\nWe crawled Malaysian and Singaporean youtube channels, total up to 60k audio files with total 185k hours.\n\nURLs data at URL\n\nNotebooks at URL## How to load the data efficiently?\n\n\n\n\n\nThis is global hashing indices if the audio chunked with 30 seconds, read more at URL## Licensing" ]