sha
stringlengths 40
40
| text
stringlengths 1
13.4M
| id
stringlengths 2
117
| tags
listlengths 1
7.91k
| created_at
stringlengths 25
25
| metadata
stringlengths 2
875k
| last_modified
stringlengths 25
25
| arxiv
listlengths 0
25
| languages
listlengths 0
7.91k
| tags_str
stringlengths 17
159k
| text_str
stringlengths 1
447k
| text_lists
listlengths 0
352
| processed_texts
listlengths 1
353
| tokens_length
listlengths 1
353
| input_texts
listlengths 1
40
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
959d4be1f17d67539131a780bf64325a794694ce | # Dataset Card for "Text-summ-llama2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | BhargaviAcharya/Text-summ-llama2 | [
"region:us"
]
| 2023-11-14T21:52:23+00:00 | {"dataset_info": {"features": [{"name": "article", "dtype": "string"}, {"name": "highlights", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "clean_summary", "dtype": "string"}, {"name": "summary_summacConv_scores", "dtype": "float64"}, {"name": "highlight_summacConv_scores", "dtype": "float64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 1577635, "num_examples": 200}], "download_size": 989832, "dataset_size": 1577635}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]} | 2023-11-14T21:55:05+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "Text-summ-llama2"
More Information needed | [
"# Dataset Card for \"Text-summ-llama2\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"Text-summ-llama2\"\n\nMore Information needed"
]
| [
6,
18
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"Text-summ-llama2\"\n\nMore Information needed"
]
|
0c79c6ae8fdcb6987848591442a1f0fc4f282723 | # Dataset Card for "question_to_sql"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | higgsfield/question_to_sql | [
"region:us"
]
| 2023-11-14T21:54:11+00:00 | {"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "completion", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 19324361, "num_examples": 78577}], "download_size": 6963083, "dataset_size": 19324361}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-14T21:54:15+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "question_to_sql"
More Information needed | [
"# Dataset Card for \"question_to_sql\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"question_to_sql\"\n\nMore Information needed"
]
| [
6,
17
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"question_to_sql\"\n\nMore Information needed"
]
|
1e4367357bfdf3f31dfaff7852516d15fb1c1f77 | # Dataset Card for "financial-customer-complaints-v5"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | aciborowska/customers-complaints | [
"region:us"
]
| 2023-11-14T21:57:27+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "Date_received", "dtype": "string"}, {"name": "Product", "dtype": "string"}, {"name": "Sub_product", "dtype": "string"}, {"name": "Issue", "dtype": "string"}, {"name": "Sub_issue", "dtype": "string"}, {"name": "Consumer_complaint_narrative", "dtype": "string"}, {"name": "Company_public_response", "dtype": "string"}, {"name": "Company", "dtype": "string"}, {"name": "State", "dtype": "string"}, {"name": "ZIP_code", "dtype": "string"}, {"name": "Tags", "dtype": "string"}, {"name": "Consumer_consent_provided?", "dtype": "string"}, {"name": "Submitted_via", "dtype": "string"}, {"name": "Date_sent_to_company", "dtype": "string"}, {"name": "Company response to consumer", "dtype": "string"}, {"name": "Timely_response?", "dtype": "string"}, {"name": "Consumer_disputed?", "dtype": "string"}, {"name": "Complaint_ID", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 40340456, "num_examples": 30000}], "download_size": 15875683, "dataset_size": 40340456}} | 2023-11-14T21:57:41+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "financial-customer-complaints-v5"
More Information needed | [
"# Dataset Card for \"financial-customer-complaints-v5\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"financial-customer-complaints-v5\"\n\nMore Information needed"
]
| [
6,
23
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"financial-customer-complaints-v5\"\n\nMore Information needed"
]
|
a029964845253e6b66025508128220a1a8ce9024 | # Dataset Card for "inference_pairs_clustered"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | gh1407/inference_pairs_clustered | [
"region:us"
]
| 2023-11-14T22:10:19+00:00 | {"dataset_info": {"features": [{"name": "original_string", "dtype": "string"}, {"name": "cause_split", "dtype": "string"}, {"name": "effect_split", "dtype": "string"}, {"name": "cause_summarized", "dtype": "string"}, {"name": "effect_summarized", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1060959, "num_examples": 1302}], "download_size": 490417, "dataset_size": 1060959}} | 2023-11-14T22:22:46+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "inference_pairs_clustered"
More Information needed | [
"# Dataset Card for \"inference_pairs_clustered\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"inference_pairs_clustered\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"inference_pairs_clustered\"\n\nMore Information needed"
]
|
c3ad0b4efbdd12e11cc4faf95aebfbd6f53b664b |
# TOFU: Task of Fictitious Unlearning 🍢
The TOFU dataset serves as a benchmark for evaluating unlearning performance of large language models on realistic tasks. The dataset comprises question-answer pairs based on autobiographies of 200 different authors that do not exist and are completely fictitiously generated by the GPT-4 model. The goal of the task is to unlearn a fine-tuned model on various fractions of the forget set.
## Quick Links
- [**Website**](https://locuslab.github.io/tofu): The landing page for TOFU
- [**arXiv Paper**](http://arxiv.org/abs/2401.06121): Detailed information about the TOFU dataset and its significance in unlearning tasks.
- [**GitHub Repository**](https://github.com/locuslab/tofu): Access the source code, fine-tuning scripts, and additional resources for the TOFU dataset.
- [**Dataset on Hugging Face**](https://huggingface.co/datasets/locuslab/TOFU): Direct link to download the TOFU dataset.
- [**Leaderboard on Hugging Face Spaces**](https://huggingface.co/spaces/locuslab/tofu_leaderboard): Current rankings and submissions for the TOFU dataset challenges.
- [**Summary on Twitter**](https://x.com/_akhaliq/status/1745643293839327268): A concise summary and key takeaways from the project.
## Applicability 🚀
The dataset is in QA format, making it ideal for use with popular chat models such as Llama2, Mistral, or Qwen. However, it also works for any other large language model. The corresponding code base is written for the Llama2 chat, and Phi-1.5 models, but can be easily adapted to other models.
## Loading the Dataset
To load the dataset, use the following code:
```python
from datasets import load_dataset
dataset = load_dataset("locuslab/TOFU", "full")
```
### Available forget sets are:
- `forget01`: Forgetting 1% of the original dataset, all entries correspond to a single author.
- `forget05`: Forgetting 5% of the original dataset, all entries correspond to a single author.
- `forget10`: Forgetting 10% of the original dataset, all entries correspond to a single author.
Retain sets corresponding to each forget set are also available, which can be used to train an Oracle model.
## Codebase
The code for training the models and the availability of all fine-tuned models can be found at our [GitHub repository](https://github.com/locuslab/tofu).
## Citing Our Work
If you find our codebase and dataset beneficial, please cite our work:
```
@misc{tofu2024,
title={TOFU: A Task of Fictitious Unlearning for LLMs},
author={Pratyush Maini and Zhili Feng and Avi Schwarzschild and Zachary C. Lipton and J. Zico Kolter},
year={2024},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
``` | locuslab/TOFU | [
"task_categories:question-answering",
"task_ids:closed-domain-qa",
"annotations_creators:machine-generated",
"language_creators:machine-generated",
"multilinguality:monolingual",
"size_categories:1K<n<10K",
"source_datasets:original",
"language:en",
"license:mit",
"unlearning",
"question answering",
"TOFU",
"NLP",
"LLM",
"arxiv:2401.06121",
"region:us"
]
| 2023-11-14T22:25:09+00:00 | {"annotations_creators": ["machine-generated"], "language_creators": ["machine-generated"], "language": ["en"], "license": "mit", "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["question-answering"], "task_ids": ["closed-domain-qa"], "pretty_name": "TOFU", "tags": ["unlearning", "question answering", "TOFU", "NLP", "LLM"], "configs": [{"config_name": "full", "data_files": "full.json", "default": true}, {"config_name": "forget01", "data_files": "forget01.json"}, {"config_name": "forget05", "data_files": "forget05.json"}, {"config_name": "forget10", "data_files": "forget10.json"}, {"config_name": "retain90", "data_files": "retain90.json"}, {"config_name": "retain95", "data_files": "retain95.json"}, {"config_name": "retain99", "data_files": "retain99.json"}, {"config_name": "world_facts", "data_files": "world_facts.json"}, {"config_name": "real_authors", "data_files": "real_authors.json"}, {"config_name": "forget01_perturbed", "data_files": "forget01_perturbed.json"}, {"config_name": "forget05_perturbed", "data_files": "forget05_perturbed.json"}, {"config_name": "forget10_perturbed", "data_files": "forget10_perturbed.json"}, {"config_name": "retain_perturbed", "data_files": "retain_perturbed.json"}, {"config_name": "world_facts_perturbed", "data_files": "world_facts_perturbed.json"}, {"config_name": "real_authors_perturbed", "data_files": "real_authors_perturbed.json"}]} | 2024-02-07T14:58:06+00:00 | [
"2401.06121"
]
| [
"en"
]
| TAGS
#task_categories-question-answering #task_ids-closed-domain-qa #annotations_creators-machine-generated #language_creators-machine-generated #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-English #license-mit #unlearning #question answering #TOFU #NLP #LLM #arxiv-2401.06121 #region-us
|
# TOFU: Task of Fictitious Unlearning
The TOFU dataset serves as a benchmark for evaluating unlearning performance of large language models on realistic tasks. The dataset comprises question-answer pairs based on autobiographies of 200 different authors that do not exist and are completely fictitiously generated by the GPT-4 model. The goal of the task is to unlearn a fine-tuned model on various fractions of the forget set.
## Quick Links
- Website: The landing page for TOFU
- arXiv Paper: Detailed information about the TOFU dataset and its significance in unlearning tasks.
- GitHub Repository: Access the source code, fine-tuning scripts, and additional resources for the TOFU dataset.
- Dataset on Hugging Face: Direct link to download the TOFU dataset.
- Leaderboard on Hugging Face Spaces: Current rankings and submissions for the TOFU dataset challenges.
- Summary on Twitter: A concise summary and key takeaways from the project.
## Applicability
The dataset is in QA format, making it ideal for use with popular chat models such as Llama2, Mistral, or Qwen. However, it also works for any other large language model. The corresponding code base is written for the Llama2 chat, and Phi-1.5 models, but can be easily adapted to other models.
## Loading the Dataset
To load the dataset, use the following code:
### Available forget sets are:
- 'forget01': Forgetting 1% of the original dataset, all entries correspond to a single author.
- 'forget05': Forgetting 5% of the original dataset, all entries correspond to a single author.
- 'forget10': Forgetting 10% of the original dataset, all entries correspond to a single author.
Retain sets corresponding to each forget set are also available, which can be used to train an Oracle model.
## Codebase
The code for training the models and the availability of all fine-tuned models can be found at our GitHub repository.
## Citing Our Work
If you find our codebase and dataset beneficial, please cite our work:
| [
"# TOFU: Task of Fictitious Unlearning \n\nThe TOFU dataset serves as a benchmark for evaluating unlearning performance of large language models on realistic tasks. The dataset comprises question-answer pairs based on autobiographies of 200 different authors that do not exist and are completely fictitiously generated by the GPT-4 model. The goal of the task is to unlearn a fine-tuned model on various fractions of the forget set.",
"## Quick Links\n\n- Website: The landing page for TOFU\n- arXiv Paper: Detailed information about the TOFU dataset and its significance in unlearning tasks.\n- GitHub Repository: Access the source code, fine-tuning scripts, and additional resources for the TOFU dataset.\n- Dataset on Hugging Face: Direct link to download the TOFU dataset.\n- Leaderboard on Hugging Face Spaces: Current rankings and submissions for the TOFU dataset challenges.\n- Summary on Twitter: A concise summary and key takeaways from the project.",
"## Applicability \n\nThe dataset is in QA format, making it ideal for use with popular chat models such as Llama2, Mistral, or Qwen. However, it also works for any other large language model. The corresponding code base is written for the Llama2 chat, and Phi-1.5 models, but can be easily adapted to other models.",
"## Loading the Dataset\n\nTo load the dataset, use the following code:",
"### Available forget sets are:\n\n- 'forget01': Forgetting 1% of the original dataset, all entries correspond to a single author.\n- 'forget05': Forgetting 5% of the original dataset, all entries correspond to a single author.\n- 'forget10': Forgetting 10% of the original dataset, all entries correspond to a single author.\n\nRetain sets corresponding to each forget set are also available, which can be used to train an Oracle model.",
"## Codebase\n\nThe code for training the models and the availability of all fine-tuned models can be found at our GitHub repository.",
"## Citing Our Work\n\nIf you find our codebase and dataset beneficial, please cite our work:"
]
| [
"TAGS\n#task_categories-question-answering #task_ids-closed-domain-qa #annotations_creators-machine-generated #language_creators-machine-generated #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-English #license-mit #unlearning #question answering #TOFU #NLP #LLM #arxiv-2401.06121 #region-us \n",
"# TOFU: Task of Fictitious Unlearning \n\nThe TOFU dataset serves as a benchmark for evaluating unlearning performance of large language models on realistic tasks. The dataset comprises question-answer pairs based on autobiographies of 200 different authors that do not exist and are completely fictitiously generated by the GPT-4 model. The goal of the task is to unlearn a fine-tuned model on various fractions of the forget set.",
"## Quick Links\n\n- Website: The landing page for TOFU\n- arXiv Paper: Detailed information about the TOFU dataset and its significance in unlearning tasks.\n- GitHub Repository: Access the source code, fine-tuning scripts, and additional resources for the TOFU dataset.\n- Dataset on Hugging Face: Direct link to download the TOFU dataset.\n- Leaderboard on Hugging Face Spaces: Current rankings and submissions for the TOFU dataset challenges.\n- Summary on Twitter: A concise summary and key takeaways from the project.",
"## Applicability \n\nThe dataset is in QA format, making it ideal for use with popular chat models such as Llama2, Mistral, or Qwen. However, it also works for any other large language model. The corresponding code base is written for the Llama2 chat, and Phi-1.5 models, but can be easily adapted to other models.",
"## Loading the Dataset\n\nTo load the dataset, use the following code:",
"### Available forget sets are:\n\n- 'forget01': Forgetting 1% of the original dataset, all entries correspond to a single author.\n- 'forget05': Forgetting 5% of the original dataset, all entries correspond to a single author.\n- 'forget10': Forgetting 10% of the original dataset, all entries correspond to a single author.\n\nRetain sets corresponding to each forget set are also available, which can be used to train an Oracle model.",
"## Codebase\n\nThe code for training the models and the availability of all fine-tuned models can be found at our GitHub repository.",
"## Citing Our Work\n\nIf you find our codebase and dataset beneficial, please cite our work:"
]
| [
118,
108,
129,
77,
16,
110,
33,
22
]
| [
"passage: TAGS\n#task_categories-question-answering #task_ids-closed-domain-qa #annotations_creators-machine-generated #language_creators-machine-generated #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-English #license-mit #unlearning #question answering #TOFU #NLP #LLM #arxiv-2401.06121 #region-us \n# TOFU: Task of Fictitious Unlearning \n\nThe TOFU dataset serves as a benchmark for evaluating unlearning performance of large language models on realistic tasks. The dataset comprises question-answer pairs based on autobiographies of 200 different authors that do not exist and are completely fictitiously generated by the GPT-4 model. The goal of the task is to unlearn a fine-tuned model on various fractions of the forget set.## Quick Links\n\n- Website: The landing page for TOFU\n- arXiv Paper: Detailed information about the TOFU dataset and its significance in unlearning tasks.\n- GitHub Repository: Access the source code, fine-tuning scripts, and additional resources for the TOFU dataset.\n- Dataset on Hugging Face: Direct link to download the TOFU dataset.\n- Leaderboard on Hugging Face Spaces: Current rankings and submissions for the TOFU dataset challenges.\n- Summary on Twitter: A concise summary and key takeaways from the project.## Applicability \n\nThe dataset is in QA format, making it ideal for use with popular chat models such as Llama2, Mistral, or Qwen. However, it also works for any other large language model. The corresponding code base is written for the Llama2 chat, and Phi-1.5 models, but can be easily adapted to other models.## Loading the Dataset\n\nTo load the dataset, use the following code:"
]
|
185d2217611cb49acd0abf83aecc61dc12a23f40 | # Dataset Card for "school-math-questions"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | higgsfield/school-math-questions | [
"region:us"
]
| 2023-11-14T22:29:15+00:00 | {"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "completion", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4787332, "num_examples": 8792}], "download_size": 2576099, "dataset_size": 4787332}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-14T22:29:19+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "school-math-questions"
More Information needed | [
"# Dataset Card for \"school-math-questions\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"school-math-questions\"\n\nMore Information needed"
]
| [
6,
16
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"school-math-questions\"\n\nMore Information needed"
]
|
ffb1ac176a8647c1867ae418bcdfb06cbf5d3a88 | # ibl-best-practices-instructor-dataset
This dataset contains a selection of various prompts and appropriate responses demonstrating a conversation between a student and a teacher.
Each response is generated to match an associated virtue expected of a professional teacher
## Dataset Format
Each datapoint has three colums
- Virtue: The corresponding postive virtue we expect a tutor to demonstrate
- Prompt: A question asked by a student to a teacher
- Response: An appropriate answer to the question asked by the student. This answer exhibits the virtue specified.
## Dataset Creation
The dataset was generated from a set of documents explaining qualities of a good teacher.
- These documents were fed to an llm (GPT-3.5) to generate a list of positive virtues that a teacher must demonstrate as specified in the documents
- A second prompt is then made to generate sample conversations (prompt and response pairs) where the response from the teacher clearly demonstrates the virtue specified.
| iblai/ibl-best-practices-instructor-dataset | [
"license:apache-2.0",
"region:us"
]
| 2023-11-14T22:41:15+00:00 | {"license": "apache-2.0"} | 2023-11-14T22:50:28+00:00 | []
| []
| TAGS
#license-apache-2.0 #region-us
| # ibl-best-practices-instructor-dataset
This dataset contains a selection of various prompts and appropriate responses demonstrating a conversation between a student and a teacher.
Each response is generated to match an associated virtue expected of a professional teacher
## Dataset Format
Each datapoint has three colums
- Virtue: The corresponding postive virtue we expect a tutor to demonstrate
- Prompt: A question asked by a student to a teacher
- Response: An appropriate answer to the question asked by the student. This answer exhibits the virtue specified.
## Dataset Creation
The dataset was generated from a set of documents explaining qualities of a good teacher.
- These documents were fed to an llm (GPT-3.5) to generate a list of positive virtues that a teacher must demonstrate as specified in the documents
- A second prompt is then made to generate sample conversations (prompt and response pairs) where the response from the teacher clearly demonstrates the virtue specified.
| [
"# ibl-best-practices-instructor-dataset\n\nThis dataset contains a selection of various prompts and appropriate responses demonstrating a conversation between a student and a teacher.\nEach response is generated to match an associated virtue expected of a professional teacher",
"## Dataset Format\nEach datapoint has three colums \n- Virtue: The corresponding postive virtue we expect a tutor to demonstrate\n- Prompt: A question asked by a student to a teacher\n- Response: An appropriate answer to the question asked by the student. This answer exhibits the virtue specified.",
"## Dataset Creation\nThe dataset was generated from a set of documents explaining qualities of a good teacher.\n- These documents were fed to an llm (GPT-3.5) to generate a list of positive virtues that a teacher must demonstrate as specified in the documents\n- A second prompt is then made to generate sample conversations (prompt and response pairs) where the response from the teacher clearly demonstrates the virtue specified."
]
| [
"TAGS\n#license-apache-2.0 #region-us \n",
"# ibl-best-practices-instructor-dataset\n\nThis dataset contains a selection of various prompts and appropriate responses demonstrating a conversation between a student and a teacher.\nEach response is generated to match an associated virtue expected of a professional teacher",
"## Dataset Format\nEach datapoint has three colums \n- Virtue: The corresponding postive virtue we expect a tutor to demonstrate\n- Prompt: A question asked by a student to a teacher\n- Response: An appropriate answer to the question asked by the student. This answer exhibits the virtue specified.",
"## Dataset Creation\nThe dataset was generated from a set of documents explaining qualities of a good teacher.\n- These documents were fed to an llm (GPT-3.5) to generate a list of positive virtues that a teacher must demonstrate as specified in the documents\n- A second prompt is then made to generate sample conversations (prompt and response pairs) where the response from the teacher clearly demonstrates the virtue specified."
]
| [
14,
58,
67,
96
]
| [
"passage: TAGS\n#license-apache-2.0 #region-us \n# ibl-best-practices-instructor-dataset\n\nThis dataset contains a selection of various prompts and appropriate responses demonstrating a conversation between a student and a teacher.\nEach response is generated to match an associated virtue expected of a professional teacher## Dataset Format\nEach datapoint has three colums \n- Virtue: The corresponding postive virtue we expect a tutor to demonstrate\n- Prompt: A question asked by a student to a teacher\n- Response: An appropriate answer to the question asked by the student. This answer exhibits the virtue specified.## Dataset Creation\nThe dataset was generated from a set of documents explaining qualities of a good teacher.\n- These documents were fed to an llm (GPT-3.5) to generate a list of positive virtues that a teacher must demonstrate as specified in the documents\n- A second prompt is then made to generate sample conversations (prompt and response pairs) where the response from the teacher clearly demonstrates the virtue specified."
]
|
87e171c950f55e087d25b0a8955758d8186bdf75 | # Dataset Card for UNcommonsense
## Dataset Description
- **Paper:** https://arxiv.org/abs/2311.08469
- **Point of Contact:** [Wenting Zhao](mailto:[email protected])
### Dataset Summary
UNcommonsense is an abductive reasoning dataset. Unlike [aNLG](https://arxiv.org/abs/1908.05739), we focus on explaining unusual, unexpected, and unlikely situations. UNcommonsense is an English-language corpus consisting of 20k unique contexts paired with explicitly uncommon outcomes. Given these contexts and uncommon outcomes, we crowdsource 41k abductive explanations, which provide a plausible explanation of how an uncommon outcome could have arisen, given an input context.
### Data Fields
- `context` (string): Several sentences describing a context.
- `outcome` (string): An unexpected outcome from the context.
- `human_explanations` (list of strings): A list of human-authored explanations that make the unexpected outcome likely given the context.
- `gpt4_explanations` (list of strings): A list of GPT-4 generated explanations that make the unexpected outcome likely given the context.
- `enhanced_explanations` (list of strings): A list of GPT-4 enhanced human-authored explanations that make the unexpected outcome likely given the context.
- `source` (string): The source of the dataset from which we created the example.
### Citation Information
Please consider citing [our paper](https://arxiv.org/pdf/2311.08469.pdf) if you find this dataset useful:
```
@article{zhao2023uncommonsense,
title={UNcommonsense Reasoning: Abductive Reasoning about Uncommon Situations},
author={Zhao, Wenting and Chiu, Justin T and Hwang, Jena D and Brahman, Faeze and Hessel, Jack and Choudhury, Sanjiban and Choi, Yejin and Li, Xiang Lorraine and Suhr, Alane},
journal={arXiv preprint arXiv:2311.08469},
year={2023}
}
``` | allenai/UNcommonsense | [
"task_categories:text-generation",
"size_categories:10K<n<100K",
"language:en",
"license:mit",
"abductive reasoning",
"commonsense reasoning",
"uncommonsense",
"arxiv:2311.08469",
"arxiv:1908.05739",
"region:us"
]
| 2023-11-14T22:47:06+00:00 | {"language": ["en"], "license": "mit", "size_categories": ["10K<n<100K"], "task_categories": ["text-generation"], "tags": ["abductive reasoning", "commonsense reasoning", "uncommonsense"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "human_explanations", "sequence": "string"}, {"name": "outcome", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "gpt4_explanations", "dtype": "string"}, {"name": "enhanced_explanations", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 16885523, "num_examples": 16099}, {"name": "validation", "num_bytes": 5319894, "num_examples": 2239}], "download_size": 12397712, "dataset_size": 22205417}} | 2024-01-19T22:24:02+00:00 | [
"2311.08469",
"1908.05739"
]
| [
"en"
]
| TAGS
#task_categories-text-generation #size_categories-10K<n<100K #language-English #license-mit #abductive reasoning #commonsense reasoning #uncommonsense #arxiv-2311.08469 #arxiv-1908.05739 #region-us
| # Dataset Card for UNcommonsense
## Dataset Description
- Paper: URL
- Point of Contact: Wenting Zhao
### Dataset Summary
UNcommonsense is an abductive reasoning dataset. Unlike aNLG, we focus on explaining unusual, unexpected, and unlikely situations. UNcommonsense is an English-language corpus consisting of 20k unique contexts paired with explicitly uncommon outcomes. Given these contexts and uncommon outcomes, we crowdsource 41k abductive explanations, which provide a plausible explanation of how an uncommon outcome could have arisen, given an input context.
### Data Fields
- 'context' (string): Several sentences describing a context.
- 'outcome' (string): An unexpected outcome from the context.
- 'human_explanations' (list of strings): A list of human-authored explanations that make the unexpected outcome likely given the context.
- 'gpt4_explanations' (list of strings): A list of GPT-4 generated explanations that make the unexpected outcome likely given the context.
- 'enhanced_explanations' (list of strings): A list of GPT-4 enhanced human-authored explanations that make the unexpected outcome likely given the context.
- 'source' (string): The source of the dataset from which we created the example.
Please consider citing our paper if you find this dataset useful:
| [
"# Dataset Card for UNcommonsense",
"## Dataset Description\n \n- Paper: URL\n- Point of Contact: Wenting Zhao",
"### Dataset Summary\n\nUNcommonsense is an abductive reasoning dataset. Unlike aNLG, we focus on explaining unusual, unexpected, and unlikely situations. UNcommonsense is an English-language corpus consisting of 20k unique contexts paired with explicitly uncommon outcomes. Given these contexts and uncommon outcomes, we crowdsource 41k abductive explanations, which provide a plausible explanation of how an uncommon outcome could have arisen, given an input context.",
"### Data Fields\n\n- 'context' (string): Several sentences describing a context.\n- 'outcome' (string): An unexpected outcome from the context.\n- 'human_explanations' (list of strings): A list of human-authored explanations that make the unexpected outcome likely given the context.\n- 'gpt4_explanations' (list of strings): A list of GPT-4 generated explanations that make the unexpected outcome likely given the context.\n- 'enhanced_explanations' (list of strings): A list of GPT-4 enhanced human-authored explanations that make the unexpected outcome likely given the context.\n- 'source' (string): The source of the dataset from which we created the example.\n \n\n\nPlease consider citing our paper if you find this dataset useful:"
]
| [
"TAGS\n#task_categories-text-generation #size_categories-10K<n<100K #language-English #license-mit #abductive reasoning #commonsense reasoning #uncommonsense #arxiv-2311.08469 #arxiv-1908.05739 #region-us \n",
"# Dataset Card for UNcommonsense",
"## Dataset Description\n \n- Paper: URL\n- Point of Contact: Wenting Zhao",
"### Dataset Summary\n\nUNcommonsense is an abductive reasoning dataset. Unlike aNLG, we focus on explaining unusual, unexpected, and unlikely situations. UNcommonsense is an English-language corpus consisting of 20k unique contexts paired with explicitly uncommon outcomes. Given these contexts and uncommon outcomes, we crowdsource 41k abductive explanations, which provide a plausible explanation of how an uncommon outcome could have arisen, given an input context.",
"### Data Fields\n\n- 'context' (string): Several sentences describing a context.\n- 'outcome' (string): An unexpected outcome from the context.\n- 'human_explanations' (list of strings): A list of human-authored explanations that make the unexpected outcome likely given the context.\n- 'gpt4_explanations' (list of strings): A list of GPT-4 generated explanations that make the unexpected outcome likely given the context.\n- 'enhanced_explanations' (list of strings): A list of GPT-4 enhanced human-authored explanations that make the unexpected outcome likely given the context.\n- 'source' (string): The source of the dataset from which we created the example.\n \n\n\nPlease consider citing our paper if you find this dataset useful:"
]
| [
72,
9,
18,
116,
182
]
| [
"passage: TAGS\n#task_categories-text-generation #size_categories-10K<n<100K #language-English #license-mit #abductive reasoning #commonsense reasoning #uncommonsense #arxiv-2311.08469 #arxiv-1908.05739 #region-us \n# Dataset Card for UNcommonsense## Dataset Description\n \n- Paper: URL\n- Point of Contact: Wenting Zhao### Dataset Summary\n\nUNcommonsense is an abductive reasoning dataset. Unlike aNLG, we focus on explaining unusual, unexpected, and unlikely situations. UNcommonsense is an English-language corpus consisting of 20k unique contexts paired with explicitly uncommon outcomes. Given these contexts and uncommon outcomes, we crowdsource 41k abductive explanations, which provide a plausible explanation of how an uncommon outcome could have arisen, given an input context.### Data Fields\n\n- 'context' (string): Several sentences describing a context.\n- 'outcome' (string): An unexpected outcome from the context.\n- 'human_explanations' (list of strings): A list of human-authored explanations that make the unexpected outcome likely given the context.\n- 'gpt4_explanations' (list of strings): A list of GPT-4 generated explanations that make the unexpected outcome likely given the context.\n- 'enhanced_explanations' (list of strings): A list of GPT-4 enhanced human-authored explanations that make the unexpected outcome likely given the context.\n- 'source' (string): The source of the dataset from which we created the example.\n \n\n\nPlease consider citing our paper if you find this dataset useful:"
]
|
ad76b637dd29dde79c6d5cef947ada290606f288 |
# Dataset Card for Evaluation run of teknium/OpenHermes-2.5-Mistral-7B
## Dataset Description
- **Homepage:**
- **Repository:** https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B
- **Paper:**
- **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard
- **Point of Contact:** [email protected]
### Dataset Summary
Dataset automatically created during the evaluation run of model [teknium/OpenHermes-2.5-Mistral-7B](https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).
The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.
The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset("open-llm-leaderboard/details_teknium__OpenHermes-2.5-Mistral-7B_public",
"harness_winogrande_5",
split="train")
```
## Latest results
These are the [latest results from run 2023-11-20T08:19:51.425757](https://huggingface.co/datasets/open-llm-leaderboard/details_teknium__OpenHermes-2.5-Mistral-7B_public/blob/main/results_2023-11-20T08-19-51.425757.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"acc": 0.632448279866198,
"acc_stderr": 0.032234779746923035,
"acc_norm": 0.6400703538735191,
"acc_norm_stderr": 0.03290159609429692,
"mc1": 0.36107711138310894,
"mc1_stderr": 0.016814312844836882,
"mc2": 0.5223798200841158,
"mc2_stderr": 0.015238875045496005,
"em": 0.30683724832214765,
"em_stderr": 0.004722927724288475,
"f1": 0.35794463087248424,
"f1_stderr": 0.004623146117214951
},
"harness|arc:challenge|25": {
"acc": 0.6126279863481229,
"acc_stderr": 0.014235872487909869,
"acc_norm": 0.6493174061433447,
"acc_norm_stderr": 0.013944635930726099
},
"harness|hellaswag|10": {
"acc": 0.6522605058753237,
"acc_stderr": 0.004752794829825045,
"acc_norm": 0.8417645887273452,
"acc_norm_stderr": 0.0036421571661623443
},
"harness|hendrycksTest-abstract_algebra|5": {
"acc": 0.33,
"acc_stderr": 0.04725815626252606,
"acc_norm": 0.33,
"acc_norm_stderr": 0.04725815626252606
},
"harness|hendrycksTest-anatomy|5": {
"acc": 0.6,
"acc_stderr": 0.04232073695151589,
"acc_norm": 0.6,
"acc_norm_stderr": 0.04232073695151589
},
"harness|hendrycksTest-astronomy|5": {
"acc": 0.6973684210526315,
"acc_stderr": 0.037385206761196686,
"acc_norm": 0.6973684210526315,
"acc_norm_stderr": 0.037385206761196686
},
"harness|hendrycksTest-business_ethics|5": {
"acc": 0.57,
"acc_stderr": 0.049756985195624284,
"acc_norm": 0.57,
"acc_norm_stderr": 0.049756985195624284
},
"harness|hendrycksTest-clinical_knowledge|5": {
"acc": 0.6792452830188679,
"acc_stderr": 0.028727502957880267,
"acc_norm": 0.6792452830188679,
"acc_norm_stderr": 0.028727502957880267
},
"harness|hendrycksTest-college_biology|5": {
"acc": 0.75,
"acc_stderr": 0.03621034121889507,
"acc_norm": 0.75,
"acc_norm_stderr": 0.03621034121889507
},
"harness|hendrycksTest-college_chemistry|5": {
"acc": 0.45,
"acc_stderr": 0.05,
"acc_norm": 0.45,
"acc_norm_stderr": 0.05
},
"harness|hendrycksTest-college_computer_science|5": {
"acc": 0.46,
"acc_stderr": 0.05009082659620333,
"acc_norm": 0.46,
"acc_norm_stderr": 0.05009082659620333
},
"harness|hendrycksTest-college_mathematics|5": {
"acc": 0.29,
"acc_stderr": 0.045604802157206845,
"acc_norm": 0.29,
"acc_norm_stderr": 0.045604802157206845
},
"harness|hendrycksTest-college_medicine|5": {
"acc": 0.6127167630057804,
"acc_stderr": 0.03714325906302065,
"acc_norm": 0.6127167630057804,
"acc_norm_stderr": 0.03714325906302065
},
"harness|hendrycksTest-college_physics|5": {
"acc": 0.38235294117647056,
"acc_stderr": 0.04835503696107223,
"acc_norm": 0.38235294117647056,
"acc_norm_stderr": 0.04835503696107223
},
"harness|hendrycksTest-computer_security|5": {
"acc": 0.76,
"acc_stderr": 0.042923469599092816,
"acc_norm": 0.76,
"acc_norm_stderr": 0.042923469599092816
},
"harness|hendrycksTest-conceptual_physics|5": {
"acc": 0.5574468085106383,
"acc_stderr": 0.032469569197899575,
"acc_norm": 0.5574468085106383,
"acc_norm_stderr": 0.032469569197899575
},
"harness|hendrycksTest-econometrics|5": {
"acc": 0.5,
"acc_stderr": 0.047036043419179864,
"acc_norm": 0.5,
"acc_norm_stderr": 0.047036043419179864
},
"harness|hendrycksTest-electrical_engineering|5": {
"acc": 0.5310344827586206,
"acc_stderr": 0.04158632762097828,
"acc_norm": 0.5310344827586206,
"acc_norm_stderr": 0.04158632762097828
},
"harness|hendrycksTest-elementary_mathematics|5": {
"acc": 0.4365079365079365,
"acc_stderr": 0.0255428468174005,
"acc_norm": 0.4365079365079365,
"acc_norm_stderr": 0.0255428468174005
},
"harness|hendrycksTest-formal_logic|5": {
"acc": 0.4603174603174603,
"acc_stderr": 0.04458029125470973,
"acc_norm": 0.4603174603174603,
"acc_norm_stderr": 0.04458029125470973
},
"harness|hendrycksTest-global_facts|5": {
"acc": 0.36,
"acc_stderr": 0.04824181513244218,
"acc_norm": 0.36,
"acc_norm_stderr": 0.04824181513244218
},
"harness|hendrycksTest-high_school_biology|5": {
"acc": 0.7935483870967742,
"acc_stderr": 0.02302589961718871,
"acc_norm": 0.7935483870967742,
"acc_norm_stderr": 0.02302589961718871
},
"harness|hendrycksTest-high_school_chemistry|5": {
"acc": 0.5073891625615764,
"acc_stderr": 0.035176035403610105,
"acc_norm": 0.5073891625615764,
"acc_norm_stderr": 0.035176035403610105
},
"harness|hendrycksTest-high_school_computer_science|5": {
"acc": 0.68,
"acc_stderr": 0.04688261722621505,
"acc_norm": 0.68,
"acc_norm_stderr": 0.04688261722621505
},
"harness|hendrycksTest-high_school_european_history|5": {
"acc": 0.7878787878787878,
"acc_stderr": 0.031922715695483016,
"acc_norm": 0.7878787878787878,
"acc_norm_stderr": 0.031922715695483016
},
"harness|hendrycksTest-high_school_geography|5": {
"acc": 0.803030303030303,
"acc_stderr": 0.02833560973246336,
"acc_norm": 0.803030303030303,
"acc_norm_stderr": 0.02833560973246336
},
"harness|hendrycksTest-high_school_government_and_politics|5": {
"acc": 0.8911917098445595,
"acc_stderr": 0.022473253332768776,
"acc_norm": 0.8911917098445595,
"acc_norm_stderr": 0.022473253332768776
},
"harness|hendrycksTest-high_school_macroeconomics|5": {
"acc": 0.6076923076923076,
"acc_stderr": 0.024756000382130956,
"acc_norm": 0.6076923076923076,
"acc_norm_stderr": 0.024756000382130956
},
"harness|hendrycksTest-high_school_mathematics|5": {
"acc": 0.2962962962962963,
"acc_stderr": 0.02784081149587193,
"acc_norm": 0.2962962962962963,
"acc_norm_stderr": 0.02784081149587193
},
"harness|hendrycksTest-high_school_microeconomics|5": {
"acc": 0.6764705882352942,
"acc_stderr": 0.030388353551886797,
"acc_norm": 0.6764705882352942,
"acc_norm_stderr": 0.030388353551886797
},
"harness|hendrycksTest-high_school_physics|5": {
"acc": 0.33774834437086093,
"acc_stderr": 0.03861557546255169,
"acc_norm": 0.33774834437086093,
"acc_norm_stderr": 0.03861557546255169
},
"harness|hendrycksTest-high_school_psychology|5": {
"acc": 0.8311926605504587,
"acc_stderr": 0.016060056268530343,
"acc_norm": 0.8311926605504587,
"acc_norm_stderr": 0.016060056268530343
},
"harness|hendrycksTest-high_school_statistics|5": {
"acc": 0.5046296296296297,
"acc_stderr": 0.03409825519163572,
"acc_norm": 0.5046296296296297,
"acc_norm_stderr": 0.03409825519163572
},
"harness|hendrycksTest-high_school_us_history|5": {
"acc": 0.7990196078431373,
"acc_stderr": 0.02812597226565437,
"acc_norm": 0.7990196078431373,
"acc_norm_stderr": 0.02812597226565437
},
"harness|hendrycksTest-high_school_world_history|5": {
"acc": 0.8143459915611815,
"acc_stderr": 0.025310495376944856,
"acc_norm": 0.8143459915611815,
"acc_norm_stderr": 0.025310495376944856
},
"harness|hendrycksTest-human_aging|5": {
"acc": 0.7040358744394619,
"acc_stderr": 0.030636591348699803,
"acc_norm": 0.7040358744394619,
"acc_norm_stderr": 0.030636591348699803
},
"harness|hendrycksTest-human_sexuality|5": {
"acc": 0.7786259541984732,
"acc_stderr": 0.03641297081313728,
"acc_norm": 0.7786259541984732,
"acc_norm_stderr": 0.03641297081313728
},
"harness|hendrycksTest-international_law|5": {
"acc": 0.7603305785123967,
"acc_stderr": 0.03896878985070416,
"acc_norm": 0.7603305785123967,
"acc_norm_stderr": 0.03896878985070416
},
"harness|hendrycksTest-jurisprudence|5": {
"acc": 0.7870370370370371,
"acc_stderr": 0.039578354719809805,
"acc_norm": 0.7870370370370371,
"acc_norm_stderr": 0.039578354719809805
},
"harness|hendrycksTest-logical_fallacies|5": {
"acc": 0.7730061349693251,
"acc_stderr": 0.03291099578615769,
"acc_norm": 0.7730061349693251,
"acc_norm_stderr": 0.03291099578615769
},
"harness|hendrycksTest-machine_learning|5": {
"acc": 0.5178571428571429,
"acc_stderr": 0.04742762361243011,
"acc_norm": 0.5178571428571429,
"acc_norm_stderr": 0.04742762361243011
},
"harness|hendrycksTest-management|5": {
"acc": 0.7766990291262136,
"acc_stderr": 0.04123553189891431,
"acc_norm": 0.7766990291262136,
"acc_norm_stderr": 0.04123553189891431
},
"harness|hendrycksTest-marketing|5": {
"acc": 0.8717948717948718,
"acc_stderr": 0.021901905115073325,
"acc_norm": 0.8717948717948718,
"acc_norm_stderr": 0.021901905115073325
},
"harness|hendrycksTest-medical_genetics|5": {
"acc": 0.7,
"acc_stderr": 0.046056618647183814,
"acc_norm": 0.7,
"acc_norm_stderr": 0.046056618647183814
},
"harness|hendrycksTest-miscellaneous|5": {
"acc": 0.822477650063857,
"acc_stderr": 0.013664230995834832,
"acc_norm": 0.822477650063857,
"acc_norm_stderr": 0.013664230995834832
},
"harness|hendrycksTest-moral_disputes|5": {
"acc": 0.7283236994219653,
"acc_stderr": 0.023948512905468355,
"acc_norm": 0.7283236994219653,
"acc_norm_stderr": 0.023948512905468355
},
"harness|hendrycksTest-moral_scenarios|5": {
"acc": 0.3106145251396648,
"acc_stderr": 0.015476515438005566,
"acc_norm": 0.3106145251396648,
"acc_norm_stderr": 0.015476515438005566
},
"harness|hendrycksTest-nutrition|5": {
"acc": 0.7581699346405228,
"acc_stderr": 0.024518195641879334,
"acc_norm": 0.7581699346405228,
"acc_norm_stderr": 0.024518195641879334
},
"harness|hendrycksTest-philosophy|5": {
"acc": 0.6945337620578779,
"acc_stderr": 0.026160584450140446,
"acc_norm": 0.6945337620578779,
"acc_norm_stderr": 0.026160584450140446
},
"harness|hendrycksTest-prehistory|5": {
"acc": 0.75,
"acc_stderr": 0.02409347123262133,
"acc_norm": 0.75,
"acc_norm_stderr": 0.02409347123262133
},
"harness|hendrycksTest-professional_accounting|5": {
"acc": 0.49645390070921985,
"acc_stderr": 0.02982674915328092,
"acc_norm": 0.49645390070921985,
"acc_norm_stderr": 0.02982674915328092
},
"harness|hendrycksTest-professional_law|5": {
"acc": 0.46936114732724904,
"acc_stderr": 0.012746237711716634,
"acc_norm": 0.46936114732724904,
"acc_norm_stderr": 0.012746237711716634
},
"harness|hendrycksTest-professional_medicine|5": {
"acc": 0.6801470588235294,
"acc_stderr": 0.028332959514031208,
"acc_norm": 0.6801470588235294,
"acc_norm_stderr": 0.028332959514031208
},
"harness|hendrycksTest-professional_psychology|5": {
"acc": 0.673202614379085,
"acc_stderr": 0.018975427920507215,
"acc_norm": 0.673202614379085,
"acc_norm_stderr": 0.018975427920507215
},
"harness|hendrycksTest-public_relations|5": {
"acc": 0.6545454545454545,
"acc_stderr": 0.04554619617541054,
"acc_norm": 0.6545454545454545,
"acc_norm_stderr": 0.04554619617541054
},
"harness|hendrycksTest-security_studies|5": {
"acc": 0.7428571428571429,
"acc_stderr": 0.027979823538744543,
"acc_norm": 0.7428571428571429,
"acc_norm_stderr": 0.027979823538744543
},
"harness|hendrycksTest-sociology|5": {
"acc": 0.8109452736318408,
"acc_stderr": 0.027686913588013024,
"acc_norm": 0.8109452736318408,
"acc_norm_stderr": 0.027686913588013024
},
"harness|hendrycksTest-us_foreign_policy|5": {
"acc": 0.87,
"acc_stderr": 0.033799766898963086,
"acc_norm": 0.87,
"acc_norm_stderr": 0.033799766898963086
},
"harness|hendrycksTest-virology|5": {
"acc": 0.5542168674698795,
"acc_stderr": 0.03869543323472101,
"acc_norm": 0.5542168674698795,
"acc_norm_stderr": 0.03869543323472101
},
"harness|hendrycksTest-world_religions|5": {
"acc": 0.8304093567251462,
"acc_stderr": 0.02878210810540171,
"acc_norm": 0.8304093567251462,
"acc_norm_stderr": 0.02878210810540171
},
"harness|truthfulqa:mc|0": {
"mc1": 0.36107711138310894,
"mc1_stderr": 0.016814312844836882,
"mc2": 0.5223798200841158,
"mc2_stderr": 0.015238875045496005
},
"harness|winogrande|5": {
"acc": 0.7805840568271507,
"acc_stderr": 0.01163126836060778
},
"harness|drop|3": {
"em": 0.30683724832214765,
"em_stderr": 0.004722927724288475,
"f1": 0.35794463087248424,
"f1_stderr": 0.004623146117214951
},
"harness|gsm8k|5": {
"acc": 0.2608036391205459,
"acc_stderr": 0.012094252417332745
}
}
```
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
[More Information Needed] | open-llm-leaderboard/details_teknium__OpenHermes-2.5-Mistral-7B | [
"region:us"
]
| 2023-11-14T22:47:46+00:00 | {"pretty_name": "Evaluation run of teknium/OpenHermes-2.5-Mistral-7B", "dataset_summary": "Dataset automatically created during the evaluation run of model [teknium/OpenHermes-2.5-Mistral-7B](https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_teknium__OpenHermes-2.5-Mistral-7B_public\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-11-20T08:19:51.425757](https://huggingface.co/datasets/open-llm-leaderboard/details_teknium__OpenHermes-2.5-Mistral-7B_public/blob/main/results_2023-11-20T08-19-51.425757.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.632448279866198,\n \"acc_stderr\": 0.032234779746923035,\n \"acc_norm\": 0.6400703538735191,\n \"acc_norm_stderr\": 0.03290159609429692,\n \"mc1\": 0.36107711138310894,\n \"mc1_stderr\": 0.016814312844836882,\n \"mc2\": 0.5223798200841158,\n \"mc2_stderr\": 0.015238875045496005,\n \"em\": 0.30683724832214765,\n \"em_stderr\": 0.004722927724288475,\n \"f1\": 0.35794463087248424,\n \"f1_stderr\": 0.004623146117214951\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.6126279863481229,\n \"acc_stderr\": 0.014235872487909869,\n \"acc_norm\": 0.6493174061433447,\n \"acc_norm_stderr\": 0.013944635930726099\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6522605058753237,\n \"acc_stderr\": 0.004752794829825045,\n \"acc_norm\": 0.8417645887273452,\n \"acc_norm_stderr\": 0.0036421571661623443\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.33,\n \"acc_stderr\": 0.04725815626252606,\n \"acc_norm\": 0.33,\n \"acc_norm_stderr\": 0.04725815626252606\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.6,\n \"acc_stderr\": 0.04232073695151589,\n \"acc_norm\": 0.6,\n \"acc_norm_stderr\": 0.04232073695151589\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.6973684210526315,\n \"acc_stderr\": 0.037385206761196686,\n \"acc_norm\": 0.6973684210526315,\n \"acc_norm_stderr\": 0.037385206761196686\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.57,\n \"acc_stderr\": 0.049756985195624284,\n \"acc_norm\": 0.57,\n \"acc_norm_stderr\": 0.049756985195624284\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.6792452830188679,\n \"acc_stderr\": 0.028727502957880267,\n \"acc_norm\": 0.6792452830188679,\n \"acc_norm_stderr\": 0.028727502957880267\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.75,\n \"acc_stderr\": 0.03621034121889507,\n \"acc_norm\": 0.75,\n \"acc_norm_stderr\": 0.03621034121889507\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.45,\n \"acc_stderr\": 0.05,\n \"acc_norm\": 0.45,\n \"acc_norm_stderr\": 0.05\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.46,\n \"acc_stderr\": 0.05009082659620333,\n \"acc_norm\": 0.46,\n \"acc_norm_stderr\": 0.05009082659620333\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.29,\n \"acc_stderr\": 0.045604802157206845,\n \"acc_norm\": 0.29,\n \"acc_norm_stderr\": 0.045604802157206845\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.6127167630057804,\n \"acc_stderr\": 0.03714325906302065,\n \"acc_norm\": 0.6127167630057804,\n \"acc_norm_stderr\": 0.03714325906302065\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.38235294117647056,\n \"acc_stderr\": 0.04835503696107223,\n \"acc_norm\": 0.38235294117647056,\n \"acc_norm_stderr\": 0.04835503696107223\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.76,\n \"acc_stderr\": 0.042923469599092816,\n \"acc_norm\": 0.76,\n \"acc_norm_stderr\": 0.042923469599092816\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.5574468085106383,\n \"acc_stderr\": 0.032469569197899575,\n \"acc_norm\": 0.5574468085106383,\n \"acc_norm_stderr\": 0.032469569197899575\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.5,\n \"acc_stderr\": 0.047036043419179864,\n \"acc_norm\": 0.5,\n \"acc_norm_stderr\": 0.047036043419179864\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5310344827586206,\n \"acc_stderr\": 0.04158632762097828,\n \"acc_norm\": 0.5310344827586206,\n \"acc_norm_stderr\": 0.04158632762097828\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.4365079365079365,\n \"acc_stderr\": 0.0255428468174005,\n \"acc_norm\": 0.4365079365079365,\n \"acc_norm_stderr\": 0.0255428468174005\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.4603174603174603,\n \"acc_stderr\": 0.04458029125470973,\n \"acc_norm\": 0.4603174603174603,\n \"acc_norm_stderr\": 0.04458029125470973\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.36,\n \"acc_stderr\": 0.04824181513244218,\n \"acc_norm\": 0.36,\n \"acc_norm_stderr\": 0.04824181513244218\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.7935483870967742,\n \"acc_stderr\": 0.02302589961718871,\n \"acc_norm\": 0.7935483870967742,\n \"acc_norm_stderr\": 0.02302589961718871\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.5073891625615764,\n \"acc_stderr\": 0.035176035403610105,\n \"acc_norm\": 0.5073891625615764,\n \"acc_norm_stderr\": 0.035176035403610105\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.68,\n \"acc_stderr\": 0.04688261722621505,\n \"acc_norm\": 0.68,\n \"acc_norm_stderr\": 0.04688261722621505\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7878787878787878,\n \"acc_stderr\": 0.031922715695483016,\n \"acc_norm\": 0.7878787878787878,\n \"acc_norm_stderr\": 0.031922715695483016\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.803030303030303,\n \"acc_stderr\": 0.02833560973246336,\n \"acc_norm\": 0.803030303030303,\n \"acc_norm_stderr\": 0.02833560973246336\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.8911917098445595,\n \"acc_stderr\": 0.022473253332768776,\n \"acc_norm\": 0.8911917098445595,\n \"acc_norm_stderr\": 0.022473253332768776\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.6076923076923076,\n \"acc_stderr\": 0.024756000382130956,\n \"acc_norm\": 0.6076923076923076,\n \"acc_norm_stderr\": 0.024756000382130956\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.2962962962962963,\n \"acc_stderr\": 0.02784081149587193,\n \"acc_norm\": 0.2962962962962963,\n \"acc_norm_stderr\": 0.02784081149587193\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.6764705882352942,\n \"acc_stderr\": 0.030388353551886797,\n \"acc_norm\": 0.6764705882352942,\n \"acc_norm_stderr\": 0.030388353551886797\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.33774834437086093,\n \"acc_stderr\": 0.03861557546255169,\n \"acc_norm\": 0.33774834437086093,\n \"acc_norm_stderr\": 0.03861557546255169\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.8311926605504587,\n \"acc_stderr\": 0.016060056268530343,\n \"acc_norm\": 0.8311926605504587,\n \"acc_norm_stderr\": 0.016060056268530343\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.5046296296296297,\n \"acc_stderr\": 0.03409825519163572,\n \"acc_norm\": 0.5046296296296297,\n \"acc_norm_stderr\": 0.03409825519163572\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.7990196078431373,\n \"acc_stderr\": 0.02812597226565437,\n \"acc_norm\": 0.7990196078431373,\n \"acc_norm_stderr\": 0.02812597226565437\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.8143459915611815,\n \"acc_stderr\": 0.025310495376944856,\n \"acc_norm\": 0.8143459915611815,\n \"acc_norm_stderr\": 0.025310495376944856\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.7040358744394619,\n \"acc_stderr\": 0.030636591348699803,\n \"acc_norm\": 0.7040358744394619,\n \"acc_norm_stderr\": 0.030636591348699803\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.7786259541984732,\n \"acc_stderr\": 0.03641297081313728,\n \"acc_norm\": 0.7786259541984732,\n \"acc_norm_stderr\": 0.03641297081313728\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.7603305785123967,\n \"acc_stderr\": 0.03896878985070416,\n \"acc_norm\": 0.7603305785123967,\n \"acc_norm_stderr\": 0.03896878985070416\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.7870370370370371,\n \"acc_stderr\": 0.039578354719809805,\n \"acc_norm\": 0.7870370370370371,\n \"acc_norm_stderr\": 0.039578354719809805\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.7730061349693251,\n \"acc_stderr\": 0.03291099578615769,\n \"acc_norm\": 0.7730061349693251,\n \"acc_norm_stderr\": 0.03291099578615769\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.5178571428571429,\n \"acc_stderr\": 0.04742762361243011,\n \"acc_norm\": 0.5178571428571429,\n \"acc_norm_stderr\": 0.04742762361243011\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.7766990291262136,\n \"acc_stderr\": 0.04123553189891431,\n \"acc_norm\": 0.7766990291262136,\n \"acc_norm_stderr\": 0.04123553189891431\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8717948717948718,\n \"acc_stderr\": 0.021901905115073325,\n \"acc_norm\": 0.8717948717948718,\n \"acc_norm_stderr\": 0.021901905115073325\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.7,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.7,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.822477650063857,\n \"acc_stderr\": 0.013664230995834832,\n \"acc_norm\": 0.822477650063857,\n \"acc_norm_stderr\": 0.013664230995834832\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.7283236994219653,\n \"acc_stderr\": 0.023948512905468355,\n \"acc_norm\": 0.7283236994219653,\n \"acc_norm_stderr\": 0.023948512905468355\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.3106145251396648,\n \"acc_stderr\": 0.015476515438005566,\n \"acc_norm\": 0.3106145251396648,\n \"acc_norm_stderr\": 0.015476515438005566\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.7581699346405228,\n \"acc_stderr\": 0.024518195641879334,\n \"acc_norm\": 0.7581699346405228,\n \"acc_norm_stderr\": 0.024518195641879334\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.6945337620578779,\n \"acc_stderr\": 0.026160584450140446,\n \"acc_norm\": 0.6945337620578779,\n \"acc_norm_stderr\": 0.026160584450140446\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.75,\n \"acc_stderr\": 0.02409347123262133,\n \"acc_norm\": 0.75,\n \"acc_norm_stderr\": 0.02409347123262133\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.49645390070921985,\n \"acc_stderr\": 0.02982674915328092,\n \"acc_norm\": 0.49645390070921985,\n \"acc_norm_stderr\": 0.02982674915328092\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.46936114732724904,\n \"acc_stderr\": 0.012746237711716634,\n \"acc_norm\": 0.46936114732724904,\n \"acc_norm_stderr\": 0.012746237711716634\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.6801470588235294,\n \"acc_stderr\": 0.028332959514031208,\n \"acc_norm\": 0.6801470588235294,\n \"acc_norm_stderr\": 0.028332959514031208\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.673202614379085,\n \"acc_stderr\": 0.018975427920507215,\n \"acc_norm\": 0.673202614379085,\n \"acc_norm_stderr\": 0.018975427920507215\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6545454545454545,\n \"acc_stderr\": 0.04554619617541054,\n \"acc_norm\": 0.6545454545454545,\n \"acc_norm_stderr\": 0.04554619617541054\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.7428571428571429,\n \"acc_stderr\": 0.027979823538744543,\n \"acc_norm\": 0.7428571428571429,\n \"acc_norm_stderr\": 0.027979823538744543\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.8109452736318408,\n \"acc_stderr\": 0.027686913588013024,\n \"acc_norm\": 0.8109452736318408,\n \"acc_norm_stderr\": 0.027686913588013024\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.87,\n \"acc_stderr\": 0.033799766898963086,\n \"acc_norm\": 0.87,\n \"acc_norm_stderr\": 0.033799766898963086\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.5542168674698795,\n \"acc_stderr\": 0.03869543323472101,\n \"acc_norm\": 0.5542168674698795,\n \"acc_norm_stderr\": 0.03869543323472101\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8304093567251462,\n \"acc_stderr\": 0.02878210810540171,\n \"acc_norm\": 0.8304093567251462,\n \"acc_norm_stderr\": 0.02878210810540171\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.36107711138310894,\n \"mc1_stderr\": 0.016814312844836882,\n \"mc2\": 0.5223798200841158,\n \"mc2_stderr\": 0.015238875045496005\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7805840568271507,\n \"acc_stderr\": 0.01163126836060778\n },\n \"harness|drop|3\": {\n \"em\": 0.30683724832214765,\n \"em_stderr\": 0.004722927724288475,\n \"f1\": 0.35794463087248424,\n \"f1_stderr\": 0.004623146117214951\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.2608036391205459,\n \"acc_stderr\": 0.012094252417332745\n }\n}\n```", "repo_url": "https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|arc:challenge|25_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|arc:challenge|25_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|drop|3_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|drop|3_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|gsm8k|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|gsm8k|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hellaswag|10_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hellaswag|10_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-management|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-virology|5_2023-11-14T22-44-46.514057.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-management|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-virology|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-management|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-virology|5_2023-11-20T08-19-51.425757.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-management|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-management|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-virology|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-virology|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|truthfulqa:mc|0_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|truthfulqa:mc|0_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["**/details_harness|winogrande|5_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["**/details_harness|winogrande|5_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-11-20T08-19-51.425757.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_11_14T22_44_46.514057", "path": ["results_2023-11-14T22-44-46.514057.parquet"]}, {"split": "2023_11_20T08_19_51.425757", "path": ["results_2023-11-20T08-19-51.425757.parquet"]}, {"split": "latest", "path": ["results_2023-11-20T08-19-51.425757.parquet"]}]}]} | 2023-11-20T08:23:35+00:00 | []
| []
| TAGS
#region-us
|
# Dataset Card for Evaluation run of teknium/OpenHermes-2.5-Mistral-7B
## Dataset Description
- Homepage:
- Repository: URL
- Paper:
- Leaderboard: URL
- Point of Contact: clementine@URL
### Dataset Summary
Dataset automatically created during the evaluation run of model teknium/OpenHermes-2.5-Mistral-7B on the Open LLM Leaderboard.
The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.
The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).
To load the details from a run, you can for instance do the following:
## Latest results
These are the latest results from run 2023-11-20T08:19:51.425757(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
### Supported Tasks and Leaderboards
### Languages
## Dataset Structure
### Data Instances
### Data Fields
### Data Splits
## Dataset Creation
### Curation Rationale
### Source Data
#### Initial Data Collection and Normalization
#### Who are the source language producers?
### Annotations
#### Annotation process
#### Who are the annotators?
### Personal and Sensitive Information
## Considerations for Using the Data
### Social Impact of Dataset
### Discussion of Biases
### Other Known Limitations
## Additional Information
### Dataset Curators
### Licensing Information
### Contributions
| [
"# Dataset Card for Evaluation run of teknium/OpenHermes-2.5-Mistral-7B",
"## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL",
"### Dataset Summary\n\nDataset automatically created during the evaluation run of model teknium/OpenHermes-2.5-Mistral-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:",
"## Latest results\n\nThese are the latest results from run 2023-11-20T08:19:51.425757(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):",
"### Supported Tasks and Leaderboards",
"### Languages",
"## Dataset Structure",
"### Data Instances",
"### Data Fields",
"### Data Splits",
"## Dataset Creation",
"### Curation Rationale",
"### Source Data",
"#### Initial Data Collection and Normalization",
"#### Who are the source language producers?",
"### Annotations",
"#### Annotation process",
"#### Who are the annotators?",
"### Personal and Sensitive Information",
"## Considerations for Using the Data",
"### Social Impact of Dataset",
"### Discussion of Biases",
"### Other Known Limitations",
"## Additional Information",
"### Dataset Curators",
"### Licensing Information",
"### Contributions"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for Evaluation run of teknium/OpenHermes-2.5-Mistral-7B",
"## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL",
"### Dataset Summary\n\nDataset automatically created during the evaluation run of model teknium/OpenHermes-2.5-Mistral-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:",
"## Latest results\n\nThese are the latest results from run 2023-11-20T08:19:51.425757(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):",
"### Supported Tasks and Leaderboards",
"### Languages",
"## Dataset Structure",
"### Data Instances",
"### Data Fields",
"### Data Splits",
"## Dataset Creation",
"### Curation Rationale",
"### Source Data",
"#### Initial Data Collection and Normalization",
"#### Who are the source language producers?",
"### Annotations",
"#### Annotation process",
"#### Who are the annotators?",
"### Personal and Sensitive Information",
"## Considerations for Using the Data",
"### Social Impact of Dataset",
"### Discussion of Biases",
"### Other Known Limitations",
"## Additional Information",
"### Dataset Curators",
"### Licensing Information",
"### Contributions"
]
| [
6,
22,
31,
171,
67,
10,
4,
6,
6,
5,
5,
5,
7,
4,
10,
10,
5,
5,
9,
8,
8,
7,
8,
7,
5,
6,
6,
5
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of teknium/OpenHermes-2.5-Mistral-7B## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model teknium/OpenHermes-2.5-Mistral-7B on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-11-20T08:19:51.425757(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions"
]
|
7fb18fa54975ac25ba399d435fd08695022e0165 | # Dataset Card for "github-issues"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | banw/github-issues | [
"region:us"
]
| 2023-11-14T22:50:37+00:00 | {"dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "repository_url", "dtype": "string"}, {"name": "labels_url", "dtype": "string"}, {"name": "comments_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "number", "dtype": "int64"}, {"name": "title", "dtype": "string"}, {"name": "user", "struct": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "labels", "list": [{"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "color", "dtype": "string"}, {"name": "default", "dtype": "bool"}, {"name": "description", "dtype": "string"}]}, {"name": "state", "dtype": "string"}, {"name": "locked", "dtype": "bool"}, {"name": "assignee", "struct": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "assignees", "list": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "comments", "dtype": "int64"}, {"name": "author_association", "dtype": "string"}, {"name": "active_lock_reason", "dtype": "null"}, {"name": "draft", "dtype": "bool"}, {"name": "pull_request", "struct": [{"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "diff_url", "dtype": "string"}, {"name": "patch_url", "dtype": "string"}, {"name": "merged_at", "dtype": "timestamp[s]"}]}, {"name": "body", "dtype": "string"}, {"name": "reactions", "struct": [{"name": "url", "dtype": "string"}, {"name": "total_count", "dtype": "int64"}, {"name": "+1", "dtype": "int64"}, {"name": "-1", "dtype": "int64"}, {"name": "laugh", "dtype": "int64"}, {"name": "hooray", "dtype": "int64"}, {"name": "confused", "dtype": "int64"}, {"name": "heart", "dtype": "int64"}, {"name": "rocket", "dtype": "int64"}, {"name": "eyes", "dtype": "int64"}]}, {"name": "timeline_url", "dtype": "string"}, {"name": "performed_via_github_app", "dtype": "null"}, {"name": "state_reason", "dtype": "string"}, {"name": "is_pull_request", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 14823574, "num_examples": 5000}], "download_size": 3663405, "dataset_size": 14823574}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-14T23:07:33+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "github-issues"
More Information needed | [
"# Dataset Card for \"github-issues\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"github-issues\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"github-issues\"\n\nMore Information needed"
]
|
8e4fc35c052a8ac4f9eda17ccb08c61893880c72 | # Dataset Card for "multiplication_whole"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | jlbaker361/multiplication_whole | [
"region:us"
]
| 2023-11-14T23:35:29+00:00 | {"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "float64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1255311.0, "num_examples": 29376}, {"name": "test", "num_bytes": 139479.0, "num_examples": 3264}], "download_size": 896516, "dataset_size": 1394790.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]} | 2023-11-15T12:59:54+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "multiplication_whole"
More Information needed | [
"# Dataset Card for \"multiplication_whole\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"multiplication_whole\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"multiplication_whole\"\n\nMore Information needed"
]
|
ac68b75a736618426bc6eabedcc8cbbf76cf6f5f | # Dataset Card for "wikipedia_multi_choices"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | vlsp-2023-vllm/wikipediaqa_vi | [
"region:us"
]
| 2023-11-14T23:36:32+00:00 | {"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "choices", "struct": [{"name": "labels", "sequence": "string"}, {"name": "text", "sequence": "string"}]}, {"name": "answerKey", "dtype": "string"}, {"name": "metadata", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 389855.8635394456, "num_examples": 2000}], "download_size": 199645, "dataset_size": 389855.8635394456}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]} | 2023-11-19T02:42:46+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "wikipedia_multi_choices"
More Information needed | [
"# Dataset Card for \"wikipedia_multi_choices\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"wikipedia_multi_choices\"\n\nMore Information needed"
]
| [
6,
17
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"wikipedia_multi_choices\"\n\nMore Information needed"
]
|
d13e11b110d873ef97f57fb197bca32e27e0d594 | # Dataset Card for "division_whole"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | jlbaker361/division_whole | [
"region:us"
]
| 2023-11-14T23:39:08+00:00 | {"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "float64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1564205.5660336576, "num_examples": 29146}, {"name": "test", "num_bytes": 173830.43396634245, "num_examples": 3239}], "download_size": 1106099, "dataset_size": 1738036.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]} | 2023-11-15T12:59:57+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "division_whole"
More Information needed | [
"# Dataset Card for \"division_whole\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"division_whole\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"division_whole\"\n\nMore Information needed"
]
|
4669171349b6dcb4abb5ed2c84eeb09f2be19330 | # Dataset Card for "subtraction_whole"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | jlbaker361/subtraction_whole | [
"region:us"
]
| 2023-11-14T23:39:09+00:00 | {"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "float64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1192290.3, "num_examples": 29376}, {"name": "test", "num_bytes": 132476.7, "num_examples": 3264}], "download_size": 684606, "dataset_size": 1324767.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]} | 2023-11-15T13:00:00+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "subtraction_whole"
More Information needed | [
"# Dataset Card for \"subtraction_whole\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"subtraction_whole\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"subtraction_whole\"\n\nMore Information needed"
]
|
23d5136055c690542893a0f943afa46230ac4386 | # Dataset Card for "addition_whole"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | jlbaker361/addition_whole | [
"region:us"
]
| 2023-11-14T23:39:10+00:00 | {"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "float64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1210405.5, "num_examples": 29376}, {"name": "test", "num_bytes": 134489.5, "num_examples": 3264}], "download_size": 730504, "dataset_size": 1344895.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]} | 2023-11-15T13:00:03+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "addition_whole"
More Information needed | [
"# Dataset Card for \"addition_whole\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"addition_whole\"\n\nMore Information needed"
]
| [
6,
16
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"addition_whole\"\n\nMore Information needed"
]
|
2808fe820bc7170ebcdd74d54694ea272022f66d | # Dataset Card for "multiplication_decimal"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | jlbaker361/multiplication_decimal | [
"region:us"
]
| 2023-11-14T23:39:12+00:00 | {"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "float64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2349837.9, "num_examples": 29376}, {"name": "test", "num_bytes": 261093.1, "num_examples": 3264}], "download_size": 1140671, "dataset_size": 2610931.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]} | 2023-11-15T13:00:10+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "multiplication_decimal"
More Information needed | [
"# Dataset Card for \"multiplication_decimal\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"multiplication_decimal\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"multiplication_decimal\"\n\nMore Information needed"
]
|
0eb81fc3e0fb2eef8917851b5fcf88aeef0e9679 | # Dataset Card for "division_decimal"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | jlbaker361/division_decimal | [
"region:us"
]
| 2023-11-14T23:39:14+00:00 | {"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "float64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2316743.856229736, "num_examples": 29146}, {"name": "test", "num_bytes": 257460.14377026403, "num_examples": 3239}], "download_size": 1214888, "dataset_size": 2574204.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]} | 2023-11-15T13:00:13+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "division_decimal"
More Information needed | [
"# Dataset Card for \"division_decimal\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"division_decimal\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"division_decimal\"\n\nMore Information needed"
]
|
194bdbea6922b4e4d3b0d8a3b39ff5be6783df3b | # Dataset Card for "subtraction_decimal"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | jlbaker361/subtraction_decimal | [
"region:us"
]
| 2023-11-14T23:39:15+00:00 | {"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "float64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2146287.6, "num_examples": 29376}, {"name": "test", "num_bytes": 238476.4, "num_examples": 3264}], "download_size": 848814, "dataset_size": 2384764.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]} | 2023-11-15T13:00:17+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "subtraction_decimal"
More Information needed | [
"# Dataset Card for \"subtraction_decimal\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"subtraction_decimal\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"subtraction_decimal\"\n\nMore Information needed"
]
|
4ab8dc2d460fd2c87366e48c58459e397cc12691 | # Dataset Card for "addition_decimal"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | jlbaker361/addition_decimal | [
"region:us"
]
| 2023-11-14T23:39:16+00:00 | {"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "float64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2145709.8, "num_examples": 29376}, {"name": "test", "num_bytes": 238412.2, "num_examples": 3264}], "download_size": 884683, "dataset_size": 2384122.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]} | 2023-11-15T13:00:20+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "addition_decimal"
More Information needed | [
"# Dataset Card for \"addition_decimal\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"addition_decimal\"\n\nMore Information needed"
]
| [
6,
16
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"addition_decimal\"\n\nMore Information needed"
]
|
ae449dc4c864e4e9867d182d3342a64637d93108 | # Batch speech
TODO: add description
| zeio/batch-speech | [
"region:us"
]
| 2023-11-14T23:43:12+00:00 | {} | 2023-11-14T22:51:33+00:00 | []
| []
| TAGS
#region-us
| # Batch speech
TODO: add description
| [
"# Batch speech\n\nTODO: add description"
]
| [
"TAGS\n#region-us \n",
"# Batch speech\n\nTODO: add description"
]
| [
6,
9
]
| [
"passage: TAGS\n#region-us \n# Batch speech\n\nTODO: add description"
]
|
167212477e9da670f24e67ab6660566744b19321 | # German UltraChat
This dataset contains the first 1k prompts from [HuggingFaceH4/ultrachat_200k](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k) translated to German and inference on with GPT-4. | bjoernp/ultrachat_de | [
"language:de",
"license:mit",
"region:us"
]
| 2023-11-14T23:47:29+00:00 | {"language": ["de"], "license": "mit", "dataset_info": {"features": [{"name": "prompt_id", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "conversations", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "len_en", "dtype": "int64"}, {"name": "len_de", "dtype": "int64"}, {"name": "system_prompt", "dtype": "string"}, {"name": "response", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5676838, "num_examples": 959}], "download_size": 3083642, "dataset_size": 5676838}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-12-02T13:07:31+00:00 | []
| [
"de"
]
| TAGS
#language-German #license-mit #region-us
| # German UltraChat
This dataset contains the first 1k prompts from HuggingFaceH4/ultrachat_200k translated to German and inference on with GPT-4. | [
"# German UltraChat\nThis dataset contains the first 1k prompts from HuggingFaceH4/ultrachat_200k translated to German and inference on with GPT-4."
]
| [
"TAGS\n#language-German #license-mit #region-us \n",
"# German UltraChat\nThis dataset contains the first 1k prompts from HuggingFaceH4/ultrachat_200k translated to German and inference on with GPT-4."
]
| [
15,
42
]
| [
"passage: TAGS\n#language-German #license-mit #region-us \n# German UltraChat\nThis dataset contains the first 1k prompts from HuggingFaceH4/ultrachat_200k translated to German and inference on with GPT-4."
]
|
b4d28801a708cbba4d9bc4d27803f0177cff87da | # Dataset Card for "ha-en_RL-grow2_I2_valid"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | pranjali97/ha-en_RL-grow2_I2_valid | [
"region:us"
]
| 2023-11-14T23:47:40+00:00 | {"dataset_info": {"features": [{"name": "src", "dtype": "string"}, {"name": "ref", "dtype": "string"}, {"name": "mt", "dtype": "string"}, {"name": "score", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 1427995, "num_examples": 3339}], "download_size": 378938, "dataset_size": 1427995}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-14T23:47:41+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "ha-en_RL-grow2_I2_valid"
More Information needed | [
"# Dataset Card for \"ha-en_RL-grow2_I2_valid\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"ha-en_RL-grow2_I2_valid\"\n\nMore Information needed"
]
| [
6,
25
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"ha-en_RL-grow2_I2_valid\"\n\nMore Information needed"
]
|
d2fe7572d192a007d4b6159a31c8eb3d379d9fe9 | # Dataset Card for "f4e64da0"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | result-kand2-sdxl-wuerst-karlo/f4e64da0 | [
"region:us"
]
| 2023-11-14T23:50:05+00:00 | {"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 171, "num_examples": 10}], "download_size": 1333, "dataset_size": 171}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-14T23:50:06+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "f4e64da0"
More Information needed | [
"# Dataset Card for \"f4e64da0\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"f4e64da0\"\n\nMore Information needed"
]
| [
6,
16
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"f4e64da0\"\n\nMore Information needed"
]
|
65f3ef5aef2e5033ad8c19019b370c57838a0011 | # Dataset Card for "test"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | jlbaker361/test | [
"region:us"
]
| 2023-11-15T00:26:41+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "float64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 231.7, "num_examples": 7}, {"name": "test", "num_bytes": 99.3, "num_examples": 3}], "download_size": 3607, "dataset_size": 331.0}} | 2023-11-15T00:26:42+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "test"
More Information needed | [
"# Dataset Card for \"test\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"test\"\n\nMore Information needed"
]
| [
6,
11
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"test\"\n\nMore Information needed"
]
|
eab2a6429fc5e3d74bc68c66502a215b45034cd8 |
# Dataset Card for Evaluation run of sequelbox/DaringFortitude
## Dataset Description
- **Homepage:**
- **Repository:** https://huggingface.co/sequelbox/DaringFortitude
- **Paper:**
- **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard
- **Point of Contact:** [email protected]
### Dataset Summary
Dataset automatically created during the evaluation run of model [sequelbox/DaringFortitude](https://huggingface.co/sequelbox/DaringFortitude) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).
The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset("open-llm-leaderboard/details_sequelbox__DaringFortitude_public",
"harness_winogrande_5",
split="train")
```
## Latest results
These are the [latest results from run 2023-11-15T00:35:47.431209](https://huggingface.co/datasets/open-llm-leaderboard/details_sequelbox__DaringFortitude_public/blob/main/results_2023-11-15T00-35-47.431209.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"acc": 0.5932217761298214,
"acc_stderr": 0.03305656216343214,
"acc_norm": 0.6027951864354921,
"acc_norm_stderr": 0.03382034227909779,
"mc1": 0.40269277845777235,
"mc1_stderr": 0.017168830935187215,
"mc2": 0.559561930249219,
"mc2_stderr": 0.015693079433704838,
"em": 0.01950503355704698,
"em_stderr": 0.0014162361849700607,
"f1": 0.12218750000000013,
"f1_stderr": 0.002284380268622334
},
"harness|arc:challenge|25": {
"acc": 0.6032423208191127,
"acc_stderr": 0.01429651302018063,
"acc_norm": 0.6348122866894198,
"acc_norm_stderr": 0.014070265519268802
},
"harness|hellaswag|10": {
"acc": 0.6360286795459071,
"acc_stderr": 0.004801572028920796,
"acc_norm": 0.8355905198167696,
"acc_norm_stderr": 0.003698892388380099
},
"harness|hendrycksTest-abstract_algebra|5": {
"acc": 0.29,
"acc_stderr": 0.045604802157206845,
"acc_norm": 0.29,
"acc_norm_stderr": 0.045604802157206845
},
"harness|hendrycksTest-anatomy|5": {
"acc": 0.5259259259259259,
"acc_stderr": 0.04313531696750575,
"acc_norm": 0.5259259259259259,
"acc_norm_stderr": 0.04313531696750575
},
"harness|hendrycksTest-astronomy|5": {
"acc": 0.618421052631579,
"acc_stderr": 0.03953173377749194,
"acc_norm": 0.618421052631579,
"acc_norm_stderr": 0.03953173377749194
},
"harness|hendrycksTest-business_ethics|5": {
"acc": 0.57,
"acc_stderr": 0.049756985195624284,
"acc_norm": 0.57,
"acc_norm_stderr": 0.049756985195624284
},
"harness|hendrycksTest-clinical_knowledge|5": {
"acc": 0.6226415094339622,
"acc_stderr": 0.029832808114796005,
"acc_norm": 0.6226415094339622,
"acc_norm_stderr": 0.029832808114796005
},
"harness|hendrycksTest-college_biology|5": {
"acc": 0.6597222222222222,
"acc_stderr": 0.039621355734862175,
"acc_norm": 0.6597222222222222,
"acc_norm_stderr": 0.039621355734862175
},
"harness|hendrycksTest-college_chemistry|5": {
"acc": 0.41,
"acc_stderr": 0.049431107042371025,
"acc_norm": 0.41,
"acc_norm_stderr": 0.049431107042371025
},
"harness|hendrycksTest-college_computer_science|5": {
"acc": 0.55,
"acc_stderr": 0.05,
"acc_norm": 0.55,
"acc_norm_stderr": 0.05
},
"harness|hendrycksTest-college_mathematics|5": {
"acc": 0.35,
"acc_stderr": 0.047937248544110196,
"acc_norm": 0.35,
"acc_norm_stderr": 0.047937248544110196
},
"harness|hendrycksTest-college_medicine|5": {
"acc": 0.5953757225433526,
"acc_stderr": 0.03742461193887248,
"acc_norm": 0.5953757225433526,
"acc_norm_stderr": 0.03742461193887248
},
"harness|hendrycksTest-college_physics|5": {
"acc": 0.3235294117647059,
"acc_stderr": 0.04655010411319616,
"acc_norm": 0.3235294117647059,
"acc_norm_stderr": 0.04655010411319616
},
"harness|hendrycksTest-computer_security|5": {
"acc": 0.69,
"acc_stderr": 0.04648231987117316,
"acc_norm": 0.69,
"acc_norm_stderr": 0.04648231987117316
},
"harness|hendrycksTest-conceptual_physics|5": {
"acc": 0.49361702127659574,
"acc_stderr": 0.03268335899936336,
"acc_norm": 0.49361702127659574,
"acc_norm_stderr": 0.03268335899936336
},
"harness|hendrycksTest-econometrics|5": {
"acc": 0.35964912280701755,
"acc_stderr": 0.04514496132873634,
"acc_norm": 0.35964912280701755,
"acc_norm_stderr": 0.04514496132873634
},
"harness|hendrycksTest-electrical_engineering|5": {
"acc": 0.5724137931034483,
"acc_stderr": 0.04122737111370333,
"acc_norm": 0.5724137931034483,
"acc_norm_stderr": 0.04122737111370333
},
"harness|hendrycksTest-elementary_mathematics|5": {
"acc": 0.3412698412698413,
"acc_stderr": 0.024419234966819067,
"acc_norm": 0.3412698412698413,
"acc_norm_stderr": 0.024419234966819067
},
"harness|hendrycksTest-formal_logic|5": {
"acc": 0.35714285714285715,
"acc_stderr": 0.04285714285714281,
"acc_norm": 0.35714285714285715,
"acc_norm_stderr": 0.04285714285714281
},
"harness|hendrycksTest-global_facts|5": {
"acc": 0.39,
"acc_stderr": 0.04902071300001975,
"acc_norm": 0.39,
"acc_norm_stderr": 0.04902071300001975
},
"harness|hendrycksTest-high_school_biology|5": {
"acc": 0.6709677419354839,
"acc_stderr": 0.026729499068349958,
"acc_norm": 0.6709677419354839,
"acc_norm_stderr": 0.026729499068349958
},
"harness|hendrycksTest-high_school_chemistry|5": {
"acc": 0.4827586206896552,
"acc_stderr": 0.035158955511656986,
"acc_norm": 0.4827586206896552,
"acc_norm_stderr": 0.035158955511656986
},
"harness|hendrycksTest-high_school_computer_science|5": {
"acc": 0.62,
"acc_stderr": 0.04878317312145632,
"acc_norm": 0.62,
"acc_norm_stderr": 0.04878317312145632
},
"harness|hendrycksTest-high_school_european_history|5": {
"acc": 0.7212121212121212,
"acc_stderr": 0.035014387062967806,
"acc_norm": 0.7212121212121212,
"acc_norm_stderr": 0.035014387062967806
},
"harness|hendrycksTest-high_school_geography|5": {
"acc": 0.7777777777777778,
"acc_stderr": 0.029620227874790482,
"acc_norm": 0.7777777777777778,
"acc_norm_stderr": 0.029620227874790482
},
"harness|hendrycksTest-high_school_government_and_politics|5": {
"acc": 0.8808290155440415,
"acc_stderr": 0.02338193534812143,
"acc_norm": 0.8808290155440415,
"acc_norm_stderr": 0.02338193534812143
},
"harness|hendrycksTest-high_school_macroeconomics|5": {
"acc": 0.6153846153846154,
"acc_stderr": 0.024666744915187222,
"acc_norm": 0.6153846153846154,
"acc_norm_stderr": 0.024666744915187222
},
"harness|hendrycksTest-high_school_mathematics|5": {
"acc": 0.32222222222222224,
"acc_stderr": 0.028493465091028597,
"acc_norm": 0.32222222222222224,
"acc_norm_stderr": 0.028493465091028597
},
"harness|hendrycksTest-high_school_microeconomics|5": {
"acc": 0.5966386554621849,
"acc_stderr": 0.031866081214088314,
"acc_norm": 0.5966386554621849,
"acc_norm_stderr": 0.031866081214088314
},
"harness|hendrycksTest-high_school_physics|5": {
"acc": 0.32450331125827814,
"acc_stderr": 0.038227469376587525,
"acc_norm": 0.32450331125827814,
"acc_norm_stderr": 0.038227469376587525
},
"harness|hendrycksTest-high_school_psychology|5": {
"acc": 0.7889908256880734,
"acc_stderr": 0.017493922404112648,
"acc_norm": 0.7889908256880734,
"acc_norm_stderr": 0.017493922404112648
},
"harness|hendrycksTest-high_school_statistics|5": {
"acc": 0.4305555555555556,
"acc_stderr": 0.03376922151252336,
"acc_norm": 0.4305555555555556,
"acc_norm_stderr": 0.03376922151252336
},
"harness|hendrycksTest-high_school_us_history|5": {
"acc": 0.8333333333333334,
"acc_stderr": 0.02615686752393104,
"acc_norm": 0.8333333333333334,
"acc_norm_stderr": 0.02615686752393104
},
"harness|hendrycksTest-high_school_world_history|5": {
"acc": 0.7848101265822784,
"acc_stderr": 0.02675082699467617,
"acc_norm": 0.7848101265822784,
"acc_norm_stderr": 0.02675082699467617
},
"harness|hendrycksTest-human_aging|5": {
"acc": 0.6860986547085202,
"acc_stderr": 0.03114679648297246,
"acc_norm": 0.6860986547085202,
"acc_norm_stderr": 0.03114679648297246
},
"harness|hendrycksTest-human_sexuality|5": {
"acc": 0.6870229007633588,
"acc_stderr": 0.04066962905677698,
"acc_norm": 0.6870229007633588,
"acc_norm_stderr": 0.04066962905677698
},
"harness|hendrycksTest-international_law|5": {
"acc": 0.7272727272727273,
"acc_stderr": 0.04065578140908706,
"acc_norm": 0.7272727272727273,
"acc_norm_stderr": 0.04065578140908706
},
"harness|hendrycksTest-jurisprudence|5": {
"acc": 0.7685185185185185,
"acc_stderr": 0.04077494709252627,
"acc_norm": 0.7685185185185185,
"acc_norm_stderr": 0.04077494709252627
},
"harness|hendrycksTest-logical_fallacies|5": {
"acc": 0.6932515337423313,
"acc_stderr": 0.03623089915724146,
"acc_norm": 0.6932515337423313,
"acc_norm_stderr": 0.03623089915724146
},
"harness|hendrycksTest-machine_learning|5": {
"acc": 0.39285714285714285,
"acc_stderr": 0.04635550135609976,
"acc_norm": 0.39285714285714285,
"acc_norm_stderr": 0.04635550135609976
},
"harness|hendrycksTest-management|5": {
"acc": 0.7572815533980582,
"acc_stderr": 0.04245022486384495,
"acc_norm": 0.7572815533980582,
"acc_norm_stderr": 0.04245022486384495
},
"harness|hendrycksTest-marketing|5": {
"acc": 0.8504273504273504,
"acc_stderr": 0.02336505149175372,
"acc_norm": 0.8504273504273504,
"acc_norm_stderr": 0.02336505149175372
},
"harness|hendrycksTest-medical_genetics|5": {
"acc": 0.6,
"acc_stderr": 0.04923659639173309,
"acc_norm": 0.6,
"acc_norm_stderr": 0.04923659639173309
},
"harness|hendrycksTest-miscellaneous|5": {
"acc": 0.7956577266922095,
"acc_stderr": 0.0144191239809319,
"acc_norm": 0.7956577266922095,
"acc_norm_stderr": 0.0144191239809319
},
"harness|hendrycksTest-moral_disputes|5": {
"acc": 0.6560693641618497,
"acc_stderr": 0.02557412378654667,
"acc_norm": 0.6560693641618497,
"acc_norm_stderr": 0.02557412378654667
},
"harness|hendrycksTest-moral_scenarios|5": {
"acc": 0.48268156424581005,
"acc_stderr": 0.01671246744170252,
"acc_norm": 0.48268156424581005,
"acc_norm_stderr": 0.01671246744170252
},
"harness|hendrycksTest-nutrition|5": {
"acc": 0.6699346405228758,
"acc_stderr": 0.026925654653615693,
"acc_norm": 0.6699346405228758,
"acc_norm_stderr": 0.026925654653615693
},
"harness|hendrycksTest-philosophy|5": {
"acc": 0.684887459807074,
"acc_stderr": 0.026385273703464492,
"acc_norm": 0.684887459807074,
"acc_norm_stderr": 0.026385273703464492
},
"harness|hendrycksTest-prehistory|5": {
"acc": 0.7191358024691358,
"acc_stderr": 0.025006469755799208,
"acc_norm": 0.7191358024691358,
"acc_norm_stderr": 0.025006469755799208
},
"harness|hendrycksTest-professional_accounting|5": {
"acc": 0.475177304964539,
"acc_stderr": 0.02979071924382972,
"acc_norm": 0.475177304964539,
"acc_norm_stderr": 0.02979071924382972
},
"harness|hendrycksTest-professional_law|5": {
"acc": 0.45827900912646674,
"acc_stderr": 0.012725701656953642,
"acc_norm": 0.45827900912646674,
"acc_norm_stderr": 0.012725701656953642
},
"harness|hendrycksTest-professional_medicine|5": {
"acc": 0.6102941176470589,
"acc_stderr": 0.0296246635811597,
"acc_norm": 0.6102941176470589,
"acc_norm_stderr": 0.0296246635811597
},
"harness|hendrycksTest-professional_psychology|5": {
"acc": 0.5915032679738562,
"acc_stderr": 0.01988622103750187,
"acc_norm": 0.5915032679738562,
"acc_norm_stderr": 0.01988622103750187
},
"harness|hendrycksTest-public_relations|5": {
"acc": 0.6636363636363637,
"acc_stderr": 0.04525393596302505,
"acc_norm": 0.6636363636363637,
"acc_norm_stderr": 0.04525393596302505
},
"harness|hendrycksTest-security_studies|5": {
"acc": 0.673469387755102,
"acc_stderr": 0.03002105623844031,
"acc_norm": 0.673469387755102,
"acc_norm_stderr": 0.03002105623844031
},
"harness|hendrycksTest-sociology|5": {
"acc": 0.7562189054726368,
"acc_stderr": 0.030360490154014645,
"acc_norm": 0.7562189054726368,
"acc_norm_stderr": 0.030360490154014645
},
"harness|hendrycksTest-us_foreign_policy|5": {
"acc": 0.83,
"acc_stderr": 0.0377525168068637,
"acc_norm": 0.83,
"acc_norm_stderr": 0.0377525168068637
},
"harness|hendrycksTest-virology|5": {
"acc": 0.4939759036144578,
"acc_stderr": 0.03892212195333045,
"acc_norm": 0.4939759036144578,
"acc_norm_stderr": 0.03892212195333045
},
"harness|hendrycksTest-world_religions|5": {
"acc": 0.8011695906432749,
"acc_stderr": 0.030611116557432528,
"acc_norm": 0.8011695906432749,
"acc_norm_stderr": 0.030611116557432528
},
"harness|truthfulqa:mc|0": {
"mc1": 0.40269277845777235,
"mc1_stderr": 0.017168830935187215,
"mc2": 0.559561930249219,
"mc2_stderr": 0.015693079433704838
},
"harness|winogrande|5": {
"acc": 0.7647987371744278,
"acc_stderr": 0.011920008163650865
},
"harness|drop|3": {
"em": 0.01950503355704698,
"em_stderr": 0.0014162361849700607,
"f1": 0.12218750000000013,
"f1_stderr": 0.002284380268622334
},
"harness|gsm8k|5": {
"acc": 0.08794541319181198,
"acc_stderr": 0.007801162197487721
}
}
```
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
[More Information Needed] | open-llm-leaderboard/details_sequelbox__DaringFortitude | [
"region:us"
]
| 2023-11-15T00:38:54+00:00 | {"pretty_name": "Evaluation run of sequelbox/DaringFortitude", "dataset_summary": "Dataset automatically created during the evaluation run of model [sequelbox/DaringFortitude](https://huggingface.co/sequelbox/DaringFortitude) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_sequelbox__DaringFortitude_public\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-11-15T00:35:47.431209](https://huggingface.co/datasets/open-llm-leaderboard/details_sequelbox__DaringFortitude_public/blob/main/results_2023-11-15T00-35-47.431209.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.5932217761298214,\n \"acc_stderr\": 0.03305656216343214,\n \"acc_norm\": 0.6027951864354921,\n \"acc_norm_stderr\": 0.03382034227909779,\n \"mc1\": 0.40269277845777235,\n \"mc1_stderr\": 0.017168830935187215,\n \"mc2\": 0.559561930249219,\n \"mc2_stderr\": 0.015693079433704838,\n \"em\": 0.01950503355704698,\n \"em_stderr\": 0.0014162361849700607,\n \"f1\": 0.12218750000000013,\n \"f1_stderr\": 0.002284380268622334\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.6032423208191127,\n \"acc_stderr\": 0.01429651302018063,\n \"acc_norm\": 0.6348122866894198,\n \"acc_norm_stderr\": 0.014070265519268802\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6360286795459071,\n \"acc_stderr\": 0.004801572028920796,\n \"acc_norm\": 0.8355905198167696,\n \"acc_norm_stderr\": 0.003698892388380099\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.29,\n \"acc_stderr\": 0.045604802157206845,\n \"acc_norm\": 0.29,\n \"acc_norm_stderr\": 0.045604802157206845\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.5259259259259259,\n \"acc_stderr\": 0.04313531696750575,\n \"acc_norm\": 0.5259259259259259,\n \"acc_norm_stderr\": 0.04313531696750575\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.618421052631579,\n \"acc_stderr\": 0.03953173377749194,\n \"acc_norm\": 0.618421052631579,\n \"acc_norm_stderr\": 0.03953173377749194\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.57,\n \"acc_stderr\": 0.049756985195624284,\n \"acc_norm\": 0.57,\n \"acc_norm_stderr\": 0.049756985195624284\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.6226415094339622,\n \"acc_stderr\": 0.029832808114796005,\n \"acc_norm\": 0.6226415094339622,\n \"acc_norm_stderr\": 0.029832808114796005\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.6597222222222222,\n \"acc_stderr\": 0.039621355734862175,\n \"acc_norm\": 0.6597222222222222,\n \"acc_norm_stderr\": 0.039621355734862175\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.41,\n \"acc_stderr\": 0.049431107042371025,\n \"acc_norm\": 0.41,\n \"acc_norm_stderr\": 0.049431107042371025\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.55,\n \"acc_stderr\": 0.05,\n \"acc_norm\": 0.55,\n \"acc_norm_stderr\": 0.05\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.35,\n \"acc_stderr\": 0.047937248544110196,\n \"acc_norm\": 0.35,\n \"acc_norm_stderr\": 0.047937248544110196\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.5953757225433526,\n \"acc_stderr\": 0.03742461193887248,\n \"acc_norm\": 0.5953757225433526,\n \"acc_norm_stderr\": 0.03742461193887248\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.3235294117647059,\n \"acc_stderr\": 0.04655010411319616,\n \"acc_norm\": 0.3235294117647059,\n \"acc_norm_stderr\": 0.04655010411319616\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.69,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.69,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.49361702127659574,\n \"acc_stderr\": 0.03268335899936336,\n \"acc_norm\": 0.49361702127659574,\n \"acc_norm_stderr\": 0.03268335899936336\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.35964912280701755,\n \"acc_stderr\": 0.04514496132873634,\n \"acc_norm\": 0.35964912280701755,\n \"acc_norm_stderr\": 0.04514496132873634\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5724137931034483,\n \"acc_stderr\": 0.04122737111370333,\n \"acc_norm\": 0.5724137931034483,\n \"acc_norm_stderr\": 0.04122737111370333\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.3412698412698413,\n \"acc_stderr\": 0.024419234966819067,\n \"acc_norm\": 0.3412698412698413,\n \"acc_norm_stderr\": 0.024419234966819067\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.35714285714285715,\n \"acc_stderr\": 0.04285714285714281,\n \"acc_norm\": 0.35714285714285715,\n \"acc_norm_stderr\": 0.04285714285714281\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.39,\n \"acc_stderr\": 0.04902071300001975,\n \"acc_norm\": 0.39,\n \"acc_norm_stderr\": 0.04902071300001975\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.6709677419354839,\n \"acc_stderr\": 0.026729499068349958,\n \"acc_norm\": 0.6709677419354839,\n \"acc_norm_stderr\": 0.026729499068349958\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.4827586206896552,\n \"acc_stderr\": 0.035158955511656986,\n \"acc_norm\": 0.4827586206896552,\n \"acc_norm_stderr\": 0.035158955511656986\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.62,\n \"acc_stderr\": 0.04878317312145632,\n \"acc_norm\": 0.62,\n \"acc_norm_stderr\": 0.04878317312145632\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7212121212121212,\n \"acc_stderr\": 0.035014387062967806,\n \"acc_norm\": 0.7212121212121212,\n \"acc_norm_stderr\": 0.035014387062967806\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.7777777777777778,\n \"acc_stderr\": 0.029620227874790482,\n \"acc_norm\": 0.7777777777777778,\n \"acc_norm_stderr\": 0.029620227874790482\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.8808290155440415,\n \"acc_stderr\": 0.02338193534812143,\n \"acc_norm\": 0.8808290155440415,\n \"acc_norm_stderr\": 0.02338193534812143\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.6153846153846154,\n \"acc_stderr\": 0.024666744915187222,\n \"acc_norm\": 0.6153846153846154,\n \"acc_norm_stderr\": 0.024666744915187222\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.32222222222222224,\n \"acc_stderr\": 0.028493465091028597,\n \"acc_norm\": 0.32222222222222224,\n \"acc_norm_stderr\": 0.028493465091028597\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.5966386554621849,\n \"acc_stderr\": 0.031866081214088314,\n \"acc_norm\": 0.5966386554621849,\n \"acc_norm_stderr\": 0.031866081214088314\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.32450331125827814,\n \"acc_stderr\": 0.038227469376587525,\n \"acc_norm\": 0.32450331125827814,\n \"acc_norm_stderr\": 0.038227469376587525\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.7889908256880734,\n \"acc_stderr\": 0.017493922404112648,\n \"acc_norm\": 0.7889908256880734,\n \"acc_norm_stderr\": 0.017493922404112648\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.4305555555555556,\n \"acc_stderr\": 0.03376922151252336,\n \"acc_norm\": 0.4305555555555556,\n \"acc_norm_stderr\": 0.03376922151252336\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.8333333333333334,\n \"acc_stderr\": 0.02615686752393104,\n \"acc_norm\": 0.8333333333333334,\n \"acc_norm_stderr\": 0.02615686752393104\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.7848101265822784,\n \"acc_stderr\": 0.02675082699467617,\n \"acc_norm\": 0.7848101265822784,\n \"acc_norm_stderr\": 0.02675082699467617\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6860986547085202,\n \"acc_stderr\": 0.03114679648297246,\n \"acc_norm\": 0.6860986547085202,\n \"acc_norm_stderr\": 0.03114679648297246\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.6870229007633588,\n \"acc_stderr\": 0.04066962905677698,\n \"acc_norm\": 0.6870229007633588,\n \"acc_norm_stderr\": 0.04066962905677698\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.7272727272727273,\n \"acc_stderr\": 0.04065578140908706,\n \"acc_norm\": 0.7272727272727273,\n \"acc_norm_stderr\": 0.04065578140908706\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.7685185185185185,\n \"acc_stderr\": 0.04077494709252627,\n \"acc_norm\": 0.7685185185185185,\n \"acc_norm_stderr\": 0.04077494709252627\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.6932515337423313,\n \"acc_stderr\": 0.03623089915724146,\n \"acc_norm\": 0.6932515337423313,\n \"acc_norm_stderr\": 0.03623089915724146\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.39285714285714285,\n \"acc_stderr\": 0.04635550135609976,\n \"acc_norm\": 0.39285714285714285,\n \"acc_norm_stderr\": 0.04635550135609976\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.7572815533980582,\n \"acc_stderr\": 0.04245022486384495,\n \"acc_norm\": 0.7572815533980582,\n \"acc_norm_stderr\": 0.04245022486384495\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8504273504273504,\n \"acc_stderr\": 0.02336505149175372,\n \"acc_norm\": 0.8504273504273504,\n \"acc_norm_stderr\": 0.02336505149175372\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.6,\n \"acc_stderr\": 0.04923659639173309,\n \"acc_norm\": 0.6,\n \"acc_norm_stderr\": 0.04923659639173309\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.7956577266922095,\n \"acc_stderr\": 0.0144191239809319,\n \"acc_norm\": 0.7956577266922095,\n \"acc_norm_stderr\": 0.0144191239809319\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.6560693641618497,\n \"acc_stderr\": 0.02557412378654667,\n \"acc_norm\": 0.6560693641618497,\n \"acc_norm_stderr\": 0.02557412378654667\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.48268156424581005,\n \"acc_stderr\": 0.01671246744170252,\n \"acc_norm\": 0.48268156424581005,\n \"acc_norm_stderr\": 0.01671246744170252\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.6699346405228758,\n \"acc_stderr\": 0.026925654653615693,\n \"acc_norm\": 0.6699346405228758,\n \"acc_norm_stderr\": 0.026925654653615693\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.684887459807074,\n \"acc_stderr\": 0.026385273703464492,\n \"acc_norm\": 0.684887459807074,\n \"acc_norm_stderr\": 0.026385273703464492\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.7191358024691358,\n \"acc_stderr\": 0.025006469755799208,\n \"acc_norm\": 0.7191358024691358,\n \"acc_norm_stderr\": 0.025006469755799208\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.475177304964539,\n \"acc_stderr\": 0.02979071924382972,\n \"acc_norm\": 0.475177304964539,\n \"acc_norm_stderr\": 0.02979071924382972\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.45827900912646674,\n \"acc_stderr\": 0.012725701656953642,\n \"acc_norm\": 0.45827900912646674,\n \"acc_norm_stderr\": 0.012725701656953642\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.6102941176470589,\n \"acc_stderr\": 0.0296246635811597,\n \"acc_norm\": 0.6102941176470589,\n \"acc_norm_stderr\": 0.0296246635811597\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.5915032679738562,\n \"acc_stderr\": 0.01988622103750187,\n \"acc_norm\": 0.5915032679738562,\n \"acc_norm_stderr\": 0.01988622103750187\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6636363636363637,\n \"acc_stderr\": 0.04525393596302505,\n \"acc_norm\": 0.6636363636363637,\n \"acc_norm_stderr\": 0.04525393596302505\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.673469387755102,\n \"acc_stderr\": 0.03002105623844031,\n \"acc_norm\": 0.673469387755102,\n \"acc_norm_stderr\": 0.03002105623844031\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.7562189054726368,\n \"acc_stderr\": 0.030360490154014645,\n \"acc_norm\": 0.7562189054726368,\n \"acc_norm_stderr\": 0.030360490154014645\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.83,\n \"acc_stderr\": 0.0377525168068637,\n \"acc_norm\": 0.83,\n \"acc_norm_stderr\": 0.0377525168068637\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.4939759036144578,\n \"acc_stderr\": 0.03892212195333045,\n \"acc_norm\": 0.4939759036144578,\n \"acc_norm_stderr\": 0.03892212195333045\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.8011695906432749,\n \"acc_stderr\": 0.030611116557432528,\n \"acc_norm\": 0.8011695906432749,\n \"acc_norm_stderr\": 0.030611116557432528\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.40269277845777235,\n \"mc1_stderr\": 0.017168830935187215,\n \"mc2\": 0.559561930249219,\n \"mc2_stderr\": 0.015693079433704838\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7647987371744278,\n \"acc_stderr\": 0.011920008163650865\n },\n \"harness|drop|3\": {\n \"em\": 0.01950503355704698,\n \"em_stderr\": 0.0014162361849700607,\n \"f1\": 0.12218750000000013,\n \"f1_stderr\": 0.002284380268622334\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.08794541319181198,\n \"acc_stderr\": 0.007801162197487721\n }\n}\n```", "repo_url": "https://huggingface.co/sequelbox/DaringFortitude", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|arc:challenge|25_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|drop|3_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|gsm8k|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hellaswag|10_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-management|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-virology|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-management|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-virology|5_2023-11-15T00-35-47.431209.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-management|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-virology|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|truthfulqa:mc|0_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["**/details_harness|winogrande|5_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-11-15T00-35-47.431209.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_11_15T00_35_47.431209", "path": ["results_2023-11-15T00-35-47.431209.parquet"]}, {"split": "latest", "path": ["results_2023-11-15T00-35-47.431209.parquet"]}]}]} | 2023-11-15T00:39:41+00:00 | []
| []
| TAGS
#region-us
|
# Dataset Card for Evaluation run of sequelbox/DaringFortitude
## Dataset Description
- Homepage:
- Repository: URL
- Paper:
- Leaderboard: URL
- Point of Contact: clementine@URL
### Dataset Summary
Dataset automatically created during the evaluation run of model sequelbox/DaringFortitude on the Open LLM Leaderboard.
The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).
To load the details from a run, you can for instance do the following:
## Latest results
These are the latest results from run 2023-11-15T00:35:47.431209(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
### Supported Tasks and Leaderboards
### Languages
## Dataset Structure
### Data Instances
### Data Fields
### Data Splits
## Dataset Creation
### Curation Rationale
### Source Data
#### Initial Data Collection and Normalization
#### Who are the source language producers?
### Annotations
#### Annotation process
#### Who are the annotators?
### Personal and Sensitive Information
## Considerations for Using the Data
### Social Impact of Dataset
### Discussion of Biases
### Other Known Limitations
## Additional Information
### Dataset Curators
### Licensing Information
### Contributions
| [
"# Dataset Card for Evaluation run of sequelbox/DaringFortitude",
"## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL",
"### Dataset Summary\n\nDataset automatically created during the evaluation run of model sequelbox/DaringFortitude on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:",
"## Latest results\n\nThese are the latest results from run 2023-11-15T00:35:47.431209(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):",
"### Supported Tasks and Leaderboards",
"### Languages",
"## Dataset Structure",
"### Data Instances",
"### Data Fields",
"### Data Splits",
"## Dataset Creation",
"### Curation Rationale",
"### Source Data",
"#### Initial Data Collection and Normalization",
"#### Who are the source language producers?",
"### Annotations",
"#### Annotation process",
"#### Who are the annotators?",
"### Personal and Sensitive Information",
"## Considerations for Using the Data",
"### Social Impact of Dataset",
"### Discussion of Biases",
"### Other Known Limitations",
"## Additional Information",
"### Dataset Curators",
"### Licensing Information",
"### Contributions"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for Evaluation run of sequelbox/DaringFortitude",
"## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL",
"### Dataset Summary\n\nDataset automatically created during the evaluation run of model sequelbox/DaringFortitude on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:",
"## Latest results\n\nThese are the latest results from run 2023-11-15T00:35:47.431209(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):",
"### Supported Tasks and Leaderboards",
"### Languages",
"## Dataset Structure",
"### Data Instances",
"### Data Fields",
"### Data Splits",
"## Dataset Creation",
"### Curation Rationale",
"### Source Data",
"#### Initial Data Collection and Normalization",
"#### Who are the source language producers?",
"### Annotations",
"#### Annotation process",
"#### Who are the annotators?",
"### Personal and Sensitive Information",
"## Considerations for Using the Data",
"### Social Impact of Dataset",
"### Discussion of Biases",
"### Other Known Limitations",
"## Additional Information",
"### Dataset Curators",
"### Licensing Information",
"### Contributions"
]
| [
6,
17,
31,
166,
67,
10,
4,
6,
6,
5,
5,
5,
7,
4,
10,
10,
5,
5,
9,
8,
8,
7,
8,
7,
5,
6,
6,
5
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of sequelbox/DaringFortitude## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model sequelbox/DaringFortitude on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-11-15T00:35:47.431209(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions"
]
|
ab9f808fe91c58eb91301e2c4660d080be30413c |
# Dataset Card for Evaluation run of ValiantLabs/ShiningValiantXS
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [ValiantLabs/ShiningValiantXS](https://huggingface.co/ValiantLabs/ShiningValiantXS) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).
The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.
The dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset("open-llm-leaderboard/details_ValiantLabs__ShiningValiantXS",
"harness_winogrande_5",
split="train")
```
## Latest results
These are the [latest results from run 2024-01-13T18:20:38.822365](https://huggingface.co/datasets/open-llm-leaderboard/details_ValiantLabs__ShiningValiantXS/blob/main/results_2024-01-13T18-20-38.822365.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"acc": 0.5675568851858357,
"acc_stderr": 0.033390996224643595,
"acc_norm": 0.5721774824296487,
"acc_norm_stderr": 0.034080917555585837,
"mc1": 0.33414932680538556,
"mc1_stderr": 0.016512530677150538,
"mc2": 0.48702658726620335,
"mc2_stderr": 0.014839126920436898
},
"harness|arc:challenge|25": {
"acc": 0.5477815699658704,
"acc_stderr": 0.01454451988063383,
"acc_norm": 0.5895904436860068,
"acc_norm_stderr": 0.014374922192642664
},
"harness|hellaswag|10": {
"acc": 0.6136227843059151,
"acc_stderr": 0.004859236191579797,
"acc_norm": 0.819259111730731,
"acc_norm_stderr": 0.003840169224012275
},
"harness|hendrycksTest-abstract_algebra|5": {
"acc": 0.33,
"acc_stderr": 0.04725815626252606,
"acc_norm": 0.33,
"acc_norm_stderr": 0.04725815626252606
},
"harness|hendrycksTest-anatomy|5": {
"acc": 0.4962962962962963,
"acc_stderr": 0.04319223625811331,
"acc_norm": 0.4962962962962963,
"acc_norm_stderr": 0.04319223625811331
},
"harness|hendrycksTest-astronomy|5": {
"acc": 0.5657894736842105,
"acc_stderr": 0.04033565667848319,
"acc_norm": 0.5657894736842105,
"acc_norm_stderr": 0.04033565667848319
},
"harness|hendrycksTest-business_ethics|5": {
"acc": 0.55,
"acc_stderr": 0.049999999999999996,
"acc_norm": 0.55,
"acc_norm_stderr": 0.049999999999999996
},
"harness|hendrycksTest-clinical_knowledge|5": {
"acc": 0.630188679245283,
"acc_stderr": 0.029711421880107936,
"acc_norm": 0.630188679245283,
"acc_norm_stderr": 0.029711421880107936
},
"harness|hendrycksTest-college_biology|5": {
"acc": 0.625,
"acc_stderr": 0.04048439222695598,
"acc_norm": 0.625,
"acc_norm_stderr": 0.04048439222695598
},
"harness|hendrycksTest-college_chemistry|5": {
"acc": 0.4,
"acc_stderr": 0.04923659639173309,
"acc_norm": 0.4,
"acc_norm_stderr": 0.04923659639173309
},
"harness|hendrycksTest-college_computer_science|5": {
"acc": 0.46,
"acc_stderr": 0.05009082659620332,
"acc_norm": 0.46,
"acc_norm_stderr": 0.05009082659620332
},
"harness|hendrycksTest-college_mathematics|5": {
"acc": 0.31,
"acc_stderr": 0.04648231987117316,
"acc_norm": 0.31,
"acc_norm_stderr": 0.04648231987117316
},
"harness|hendrycksTest-college_medicine|5": {
"acc": 0.5260115606936416,
"acc_stderr": 0.038073017265045125,
"acc_norm": 0.5260115606936416,
"acc_norm_stderr": 0.038073017265045125
},
"harness|hendrycksTest-college_physics|5": {
"acc": 0.2549019607843137,
"acc_stderr": 0.04336432707993179,
"acc_norm": 0.2549019607843137,
"acc_norm_stderr": 0.04336432707993179
},
"harness|hendrycksTest-computer_security|5": {
"acc": 0.7,
"acc_stderr": 0.046056618647183814,
"acc_norm": 0.7,
"acc_norm_stderr": 0.046056618647183814
},
"harness|hendrycksTest-conceptual_physics|5": {
"acc": 0.42127659574468085,
"acc_stderr": 0.03227834510146268,
"acc_norm": 0.42127659574468085,
"acc_norm_stderr": 0.03227834510146268
},
"harness|hendrycksTest-econometrics|5": {
"acc": 0.3157894736842105,
"acc_stderr": 0.04372748290278007,
"acc_norm": 0.3157894736842105,
"acc_norm_stderr": 0.04372748290278007
},
"harness|hendrycksTest-electrical_engineering|5": {
"acc": 0.5448275862068965,
"acc_stderr": 0.04149886942192118,
"acc_norm": 0.5448275862068965,
"acc_norm_stderr": 0.04149886942192118
},
"harness|hendrycksTest-elementary_mathematics|5": {
"acc": 0.34656084656084657,
"acc_stderr": 0.024508777521028428,
"acc_norm": 0.34656084656084657,
"acc_norm_stderr": 0.024508777521028428
},
"harness|hendrycksTest-formal_logic|5": {
"acc": 0.3253968253968254,
"acc_stderr": 0.041905964388711366,
"acc_norm": 0.3253968253968254,
"acc_norm_stderr": 0.041905964388711366
},
"harness|hendrycksTest-global_facts|5": {
"acc": 0.34,
"acc_stderr": 0.04760952285695236,
"acc_norm": 0.34,
"acc_norm_stderr": 0.04760952285695236
},
"harness|hendrycksTest-high_school_biology|5": {
"acc": 0.6451612903225806,
"acc_stderr": 0.02721888977330877,
"acc_norm": 0.6451612903225806,
"acc_norm_stderr": 0.02721888977330877
},
"harness|hendrycksTest-high_school_chemistry|5": {
"acc": 0.45320197044334976,
"acc_stderr": 0.03502544650845872,
"acc_norm": 0.45320197044334976,
"acc_norm_stderr": 0.03502544650845872
},
"harness|hendrycksTest-high_school_computer_science|5": {
"acc": 0.56,
"acc_stderr": 0.04988876515698589,
"acc_norm": 0.56,
"acc_norm_stderr": 0.04988876515698589
},
"harness|hendrycksTest-high_school_european_history|5": {
"acc": 0.6787878787878788,
"acc_stderr": 0.036462049632538115,
"acc_norm": 0.6787878787878788,
"acc_norm_stderr": 0.036462049632538115
},
"harness|hendrycksTest-high_school_geography|5": {
"acc": 0.7474747474747475,
"acc_stderr": 0.030954055470365914,
"acc_norm": 0.7474747474747475,
"acc_norm_stderr": 0.030954055470365914
},
"harness|hendrycksTest-high_school_government_and_politics|5": {
"acc": 0.8290155440414507,
"acc_stderr": 0.027171213683164542,
"acc_norm": 0.8290155440414507,
"acc_norm_stderr": 0.027171213683164542
},
"harness|hendrycksTest-high_school_macroeconomics|5": {
"acc": 0.5435897435897435,
"acc_stderr": 0.025254485424799605,
"acc_norm": 0.5435897435897435,
"acc_norm_stderr": 0.025254485424799605
},
"harness|hendrycksTest-high_school_mathematics|5": {
"acc": 0.3074074074074074,
"acc_stderr": 0.028133252578815635,
"acc_norm": 0.3074074074074074,
"acc_norm_stderr": 0.028133252578815635
},
"harness|hendrycksTest-high_school_microeconomics|5": {
"acc": 0.5336134453781513,
"acc_stderr": 0.03240501447690071,
"acc_norm": 0.5336134453781513,
"acc_norm_stderr": 0.03240501447690071
},
"harness|hendrycksTest-high_school_physics|5": {
"acc": 0.2781456953642384,
"acc_stderr": 0.03658603262763743,
"acc_norm": 0.2781456953642384,
"acc_norm_stderr": 0.03658603262763743
},
"harness|hendrycksTest-high_school_psychology|5": {
"acc": 0.7596330275229358,
"acc_stderr": 0.01832060732096407,
"acc_norm": 0.7596330275229358,
"acc_norm_stderr": 0.01832060732096407
},
"harness|hendrycksTest-high_school_statistics|5": {
"acc": 0.4583333333333333,
"acc_stderr": 0.03398110890294636,
"acc_norm": 0.4583333333333333,
"acc_norm_stderr": 0.03398110890294636
},
"harness|hendrycksTest-high_school_us_history|5": {
"acc": 0.7941176470588235,
"acc_stderr": 0.028379449451588663,
"acc_norm": 0.7941176470588235,
"acc_norm_stderr": 0.028379449451588663
},
"harness|hendrycksTest-high_school_world_history|5": {
"acc": 0.7763713080168776,
"acc_stderr": 0.027123298205229966,
"acc_norm": 0.7763713080168776,
"acc_norm_stderr": 0.027123298205229966
},
"harness|hendrycksTest-human_aging|5": {
"acc": 0.7085201793721974,
"acc_stderr": 0.03050028317654585,
"acc_norm": 0.7085201793721974,
"acc_norm_stderr": 0.03050028317654585
},
"harness|hendrycksTest-human_sexuality|5": {
"acc": 0.6641221374045801,
"acc_stderr": 0.041423137719966634,
"acc_norm": 0.6641221374045801,
"acc_norm_stderr": 0.041423137719966634
},
"harness|hendrycksTest-international_law|5": {
"acc": 0.743801652892562,
"acc_stderr": 0.03984979653302872,
"acc_norm": 0.743801652892562,
"acc_norm_stderr": 0.03984979653302872
},
"harness|hendrycksTest-jurisprudence|5": {
"acc": 0.7592592592592593,
"acc_stderr": 0.04133119440243839,
"acc_norm": 0.7592592592592593,
"acc_norm_stderr": 0.04133119440243839
},
"harness|hendrycksTest-logical_fallacies|5": {
"acc": 0.7116564417177914,
"acc_stderr": 0.035590395316173425,
"acc_norm": 0.7116564417177914,
"acc_norm_stderr": 0.035590395316173425
},
"harness|hendrycksTest-machine_learning|5": {
"acc": 0.36607142857142855,
"acc_stderr": 0.045723723587374296,
"acc_norm": 0.36607142857142855,
"acc_norm_stderr": 0.045723723587374296
},
"harness|hendrycksTest-management|5": {
"acc": 0.6699029126213593,
"acc_stderr": 0.0465614711001235,
"acc_norm": 0.6699029126213593,
"acc_norm_stderr": 0.0465614711001235
},
"harness|hendrycksTest-marketing|5": {
"acc": 0.7991452991452992,
"acc_stderr": 0.02624677294689048,
"acc_norm": 0.7991452991452992,
"acc_norm_stderr": 0.02624677294689048
},
"harness|hendrycksTest-medical_genetics|5": {
"acc": 0.58,
"acc_stderr": 0.04960449637488583,
"acc_norm": 0.58,
"acc_norm_stderr": 0.04960449637488583
},
"harness|hendrycksTest-miscellaneous|5": {
"acc": 0.7803320561941252,
"acc_stderr": 0.014805384478371155,
"acc_norm": 0.7803320561941252,
"acc_norm_stderr": 0.014805384478371155
},
"harness|hendrycksTest-moral_disputes|5": {
"acc": 0.653179190751445,
"acc_stderr": 0.025624723994030457,
"acc_norm": 0.653179190751445,
"acc_norm_stderr": 0.025624723994030457
},
"harness|hendrycksTest-moral_scenarios|5": {
"acc": 0.3474860335195531,
"acc_stderr": 0.01592556406020815,
"acc_norm": 0.3474860335195531,
"acc_norm_stderr": 0.01592556406020815
},
"harness|hendrycksTest-nutrition|5": {
"acc": 0.6339869281045751,
"acc_stderr": 0.027582811415159614,
"acc_norm": 0.6339869281045751,
"acc_norm_stderr": 0.027582811415159614
},
"harness|hendrycksTest-philosophy|5": {
"acc": 0.6430868167202572,
"acc_stderr": 0.027210420375934023,
"acc_norm": 0.6430868167202572,
"acc_norm_stderr": 0.027210420375934023
},
"harness|hendrycksTest-prehistory|5": {
"acc": 0.6697530864197531,
"acc_stderr": 0.026168298456732846,
"acc_norm": 0.6697530864197531,
"acc_norm_stderr": 0.026168298456732846
},
"harness|hendrycksTest-professional_accounting|5": {
"acc": 0.41843971631205673,
"acc_stderr": 0.02942799403941999,
"acc_norm": 0.41843971631205673,
"acc_norm_stderr": 0.02942799403941999
},
"harness|hendrycksTest-professional_law|5": {
"acc": 0.4198174706649283,
"acc_stderr": 0.012604960816087373,
"acc_norm": 0.4198174706649283,
"acc_norm_stderr": 0.012604960816087373
},
"harness|hendrycksTest-professional_medicine|5": {
"acc": 0.5404411764705882,
"acc_stderr": 0.03027332507734575,
"acc_norm": 0.5404411764705882,
"acc_norm_stderr": 0.03027332507734575
},
"harness|hendrycksTest-professional_psychology|5": {
"acc": 0.5800653594771242,
"acc_stderr": 0.019966811178256483,
"acc_norm": 0.5800653594771242,
"acc_norm_stderr": 0.019966811178256483
},
"harness|hendrycksTest-public_relations|5": {
"acc": 0.6181818181818182,
"acc_stderr": 0.046534298079135075,
"acc_norm": 0.6181818181818182,
"acc_norm_stderr": 0.046534298079135075
},
"harness|hendrycksTest-security_studies|5": {
"acc": 0.636734693877551,
"acc_stderr": 0.030789051139030806,
"acc_norm": 0.636734693877551,
"acc_norm_stderr": 0.030789051139030806
},
"harness|hendrycksTest-sociology|5": {
"acc": 0.7711442786069652,
"acc_stderr": 0.029705284056772436,
"acc_norm": 0.7711442786069652,
"acc_norm_stderr": 0.029705284056772436
},
"harness|hendrycksTest-us_foreign_policy|5": {
"acc": 0.85,
"acc_stderr": 0.0358870281282637,
"acc_norm": 0.85,
"acc_norm_stderr": 0.0358870281282637
},
"harness|hendrycksTest-virology|5": {
"acc": 0.4939759036144578,
"acc_stderr": 0.03892212195333045,
"acc_norm": 0.4939759036144578,
"acc_norm_stderr": 0.03892212195333045
},
"harness|hendrycksTest-world_religions|5": {
"acc": 0.783625730994152,
"acc_stderr": 0.03158149539338734,
"acc_norm": 0.783625730994152,
"acc_norm_stderr": 0.03158149539338734
},
"harness|truthfulqa:mc|0": {
"mc1": 0.33414932680538556,
"mc1_stderr": 0.016512530677150538,
"mc2": 0.48702658726620335,
"mc2_stderr": 0.014839126920436898
},
"harness|winogrande|5": {
"acc": 0.7695343330702447,
"acc_stderr": 0.01183587216483667
},
"harness|gsm8k|5": {
"acc": 0.34040940106141016,
"acc_stderr": 0.013052097103299104
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] | open-llm-leaderboard/details_ValiantLabs__ShiningValiantXS | [
"region:us"
]
| 2023-11-15T00:41:28+00:00 | {"pretty_name": "Evaluation run of ValiantLabs/ShiningValiantXS", "dataset_summary": "Dataset automatically created during the evaluation run of model [ValiantLabs/ShiningValiantXS](https://huggingface.co/ValiantLabs/ShiningValiantXS) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_ValiantLabs__ShiningValiantXS\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2024-01-13T18:20:38.822365](https://huggingface.co/datasets/open-llm-leaderboard/details_ValiantLabs__ShiningValiantXS/blob/main/results_2024-01-13T18-20-38.822365.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.5675568851858357,\n \"acc_stderr\": 0.033390996224643595,\n \"acc_norm\": 0.5721774824296487,\n \"acc_norm_stderr\": 0.034080917555585837,\n \"mc1\": 0.33414932680538556,\n \"mc1_stderr\": 0.016512530677150538,\n \"mc2\": 0.48702658726620335,\n \"mc2_stderr\": 0.014839126920436898\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.5477815699658704,\n \"acc_stderr\": 0.01454451988063383,\n \"acc_norm\": 0.5895904436860068,\n \"acc_norm_stderr\": 0.014374922192642664\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6136227843059151,\n \"acc_stderr\": 0.004859236191579797,\n \"acc_norm\": 0.819259111730731,\n \"acc_norm_stderr\": 0.003840169224012275\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.33,\n \"acc_stderr\": 0.04725815626252606,\n \"acc_norm\": 0.33,\n \"acc_norm_stderr\": 0.04725815626252606\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.4962962962962963,\n \"acc_stderr\": 0.04319223625811331,\n \"acc_norm\": 0.4962962962962963,\n \"acc_norm_stderr\": 0.04319223625811331\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.5657894736842105,\n \"acc_stderr\": 0.04033565667848319,\n \"acc_norm\": 0.5657894736842105,\n \"acc_norm_stderr\": 0.04033565667848319\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.55,\n \"acc_stderr\": 0.049999999999999996,\n \"acc_norm\": 0.55,\n \"acc_norm_stderr\": 0.049999999999999996\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.630188679245283,\n \"acc_stderr\": 0.029711421880107936,\n \"acc_norm\": 0.630188679245283,\n \"acc_norm_stderr\": 0.029711421880107936\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.625,\n \"acc_stderr\": 0.04048439222695598,\n \"acc_norm\": 0.625,\n \"acc_norm_stderr\": 0.04048439222695598\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.4,\n \"acc_stderr\": 0.04923659639173309,\n \"acc_norm\": 0.4,\n \"acc_norm_stderr\": 0.04923659639173309\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.46,\n \"acc_stderr\": 0.05009082659620332,\n \"acc_norm\": 0.46,\n \"acc_norm_stderr\": 0.05009082659620332\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.31,\n \"acc_stderr\": 0.04648231987117316,\n \"acc_norm\": 0.31,\n \"acc_norm_stderr\": 0.04648231987117316\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.5260115606936416,\n \"acc_stderr\": 0.038073017265045125,\n \"acc_norm\": 0.5260115606936416,\n \"acc_norm_stderr\": 0.038073017265045125\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.2549019607843137,\n \"acc_stderr\": 0.04336432707993179,\n \"acc_norm\": 0.2549019607843137,\n \"acc_norm_stderr\": 0.04336432707993179\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.7,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.7,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.42127659574468085,\n \"acc_stderr\": 0.03227834510146268,\n \"acc_norm\": 0.42127659574468085,\n \"acc_norm_stderr\": 0.03227834510146268\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.3157894736842105,\n \"acc_stderr\": 0.04372748290278007,\n \"acc_norm\": 0.3157894736842105,\n \"acc_norm_stderr\": 0.04372748290278007\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5448275862068965,\n \"acc_stderr\": 0.04149886942192118,\n \"acc_norm\": 0.5448275862068965,\n \"acc_norm_stderr\": 0.04149886942192118\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.34656084656084657,\n \"acc_stderr\": 0.024508777521028428,\n \"acc_norm\": 0.34656084656084657,\n \"acc_norm_stderr\": 0.024508777521028428\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.3253968253968254,\n \"acc_stderr\": 0.041905964388711366,\n \"acc_norm\": 0.3253968253968254,\n \"acc_norm_stderr\": 0.041905964388711366\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.34,\n \"acc_stderr\": 0.04760952285695236,\n \"acc_norm\": 0.34,\n \"acc_norm_stderr\": 0.04760952285695236\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.6451612903225806,\n \"acc_stderr\": 0.02721888977330877,\n \"acc_norm\": 0.6451612903225806,\n \"acc_norm_stderr\": 0.02721888977330877\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.45320197044334976,\n \"acc_stderr\": 0.03502544650845872,\n \"acc_norm\": 0.45320197044334976,\n \"acc_norm_stderr\": 0.03502544650845872\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.56,\n \"acc_stderr\": 0.04988876515698589,\n \"acc_norm\": 0.56,\n \"acc_norm_stderr\": 0.04988876515698589\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.6787878787878788,\n \"acc_stderr\": 0.036462049632538115,\n \"acc_norm\": 0.6787878787878788,\n \"acc_norm_stderr\": 0.036462049632538115\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.7474747474747475,\n \"acc_stderr\": 0.030954055470365914,\n \"acc_norm\": 0.7474747474747475,\n \"acc_norm_stderr\": 0.030954055470365914\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.8290155440414507,\n \"acc_stderr\": 0.027171213683164542,\n \"acc_norm\": 0.8290155440414507,\n \"acc_norm_stderr\": 0.027171213683164542\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.5435897435897435,\n \"acc_stderr\": 0.025254485424799605,\n \"acc_norm\": 0.5435897435897435,\n \"acc_norm_stderr\": 0.025254485424799605\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.3074074074074074,\n \"acc_stderr\": 0.028133252578815635,\n \"acc_norm\": 0.3074074074074074,\n \"acc_norm_stderr\": 0.028133252578815635\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.5336134453781513,\n \"acc_stderr\": 0.03240501447690071,\n \"acc_norm\": 0.5336134453781513,\n \"acc_norm_stderr\": 0.03240501447690071\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.2781456953642384,\n \"acc_stderr\": 0.03658603262763743,\n \"acc_norm\": 0.2781456953642384,\n \"acc_norm_stderr\": 0.03658603262763743\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.7596330275229358,\n \"acc_stderr\": 0.01832060732096407,\n \"acc_norm\": 0.7596330275229358,\n \"acc_norm_stderr\": 0.01832060732096407\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.4583333333333333,\n \"acc_stderr\": 0.03398110890294636,\n \"acc_norm\": 0.4583333333333333,\n \"acc_norm_stderr\": 0.03398110890294636\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.7941176470588235,\n \"acc_stderr\": 0.028379449451588663,\n \"acc_norm\": 0.7941176470588235,\n \"acc_norm_stderr\": 0.028379449451588663\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.7763713080168776,\n \"acc_stderr\": 0.027123298205229966,\n \"acc_norm\": 0.7763713080168776,\n \"acc_norm_stderr\": 0.027123298205229966\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.7085201793721974,\n \"acc_stderr\": 0.03050028317654585,\n \"acc_norm\": 0.7085201793721974,\n \"acc_norm_stderr\": 0.03050028317654585\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.6641221374045801,\n \"acc_stderr\": 0.041423137719966634,\n \"acc_norm\": 0.6641221374045801,\n \"acc_norm_stderr\": 0.041423137719966634\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.743801652892562,\n \"acc_stderr\": 0.03984979653302872,\n \"acc_norm\": 0.743801652892562,\n \"acc_norm_stderr\": 0.03984979653302872\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.7592592592592593,\n \"acc_stderr\": 0.04133119440243839,\n \"acc_norm\": 0.7592592592592593,\n \"acc_norm_stderr\": 0.04133119440243839\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.7116564417177914,\n \"acc_stderr\": 0.035590395316173425,\n \"acc_norm\": 0.7116564417177914,\n \"acc_norm_stderr\": 0.035590395316173425\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.36607142857142855,\n \"acc_stderr\": 0.045723723587374296,\n \"acc_norm\": 0.36607142857142855,\n \"acc_norm_stderr\": 0.045723723587374296\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.6699029126213593,\n \"acc_stderr\": 0.0465614711001235,\n \"acc_norm\": 0.6699029126213593,\n \"acc_norm_stderr\": 0.0465614711001235\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.7991452991452992,\n \"acc_stderr\": 0.02624677294689048,\n \"acc_norm\": 0.7991452991452992,\n \"acc_norm_stderr\": 0.02624677294689048\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.58,\n \"acc_stderr\": 0.04960449637488583,\n \"acc_norm\": 0.58,\n \"acc_norm_stderr\": 0.04960449637488583\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.7803320561941252,\n \"acc_stderr\": 0.014805384478371155,\n \"acc_norm\": 0.7803320561941252,\n \"acc_norm_stderr\": 0.014805384478371155\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.653179190751445,\n \"acc_stderr\": 0.025624723994030457,\n \"acc_norm\": 0.653179190751445,\n \"acc_norm_stderr\": 0.025624723994030457\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.3474860335195531,\n \"acc_stderr\": 0.01592556406020815,\n \"acc_norm\": 0.3474860335195531,\n \"acc_norm_stderr\": 0.01592556406020815\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.6339869281045751,\n \"acc_stderr\": 0.027582811415159614,\n \"acc_norm\": 0.6339869281045751,\n \"acc_norm_stderr\": 0.027582811415159614\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.6430868167202572,\n \"acc_stderr\": 0.027210420375934023,\n \"acc_norm\": 0.6430868167202572,\n \"acc_norm_stderr\": 0.027210420375934023\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.6697530864197531,\n \"acc_stderr\": 0.026168298456732846,\n \"acc_norm\": 0.6697530864197531,\n \"acc_norm_stderr\": 0.026168298456732846\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.41843971631205673,\n \"acc_stderr\": 0.02942799403941999,\n \"acc_norm\": 0.41843971631205673,\n \"acc_norm_stderr\": 0.02942799403941999\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.4198174706649283,\n \"acc_stderr\": 0.012604960816087373,\n \"acc_norm\": 0.4198174706649283,\n \"acc_norm_stderr\": 0.012604960816087373\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.5404411764705882,\n \"acc_stderr\": 0.03027332507734575,\n \"acc_norm\": 0.5404411764705882,\n \"acc_norm_stderr\": 0.03027332507734575\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.5800653594771242,\n \"acc_stderr\": 0.019966811178256483,\n \"acc_norm\": 0.5800653594771242,\n \"acc_norm_stderr\": 0.019966811178256483\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6181818181818182,\n \"acc_stderr\": 0.046534298079135075,\n \"acc_norm\": 0.6181818181818182,\n \"acc_norm_stderr\": 0.046534298079135075\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.636734693877551,\n \"acc_stderr\": 0.030789051139030806,\n \"acc_norm\": 0.636734693877551,\n \"acc_norm_stderr\": 0.030789051139030806\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.7711442786069652,\n \"acc_stderr\": 0.029705284056772436,\n \"acc_norm\": 0.7711442786069652,\n \"acc_norm_stderr\": 0.029705284056772436\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.85,\n \"acc_stderr\": 0.0358870281282637,\n \"acc_norm\": 0.85,\n \"acc_norm_stderr\": 0.0358870281282637\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.4939759036144578,\n \"acc_stderr\": 0.03892212195333045,\n \"acc_norm\": 0.4939759036144578,\n \"acc_norm_stderr\": 0.03892212195333045\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.783625730994152,\n \"acc_stderr\": 0.03158149539338734,\n \"acc_norm\": 0.783625730994152,\n \"acc_norm_stderr\": 0.03158149539338734\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.33414932680538556,\n \"mc1_stderr\": 0.016512530677150538,\n \"mc2\": 0.48702658726620335,\n \"mc2_stderr\": 0.014839126920436898\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7695343330702447,\n \"acc_stderr\": 0.01183587216483667\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.34040940106141016,\n \"acc_stderr\": 0.013052097103299104\n }\n}\n```", "repo_url": "https://huggingface.co/ValiantLabs/ShiningValiantXS", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|arc:challenge|25_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|arc:challenge|25_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|arc:challenge|25_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|drop|3_2023-11-15T00-38-24.676368.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-11-15T00-38-24.676368.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|gsm8k|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|gsm8k|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|gsm8k|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hellaswag|10_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hellaswag|10_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hellaswag|10_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-management|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-virology|5_2023-11-15T00-38-24.676368.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-management|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-virology|5_2023-12-04T19-27-31.992415.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-anatomy|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-astronomy|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-college_biology|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-college_physics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-computer_security|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-econometrics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-global_facts|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-human_aging|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-international_law|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-management|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-marketing|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-nutrition|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-philosophy|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-prehistory|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-professional_law|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-public_relations|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-security_studies|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-sociology|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-virology|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-world_religions|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-anatomy|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-astronomy|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-college_biology|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-college_physics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-computer_security|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-econometrics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-global_facts|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-human_aging|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-international_law|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-management|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-marketing|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-nutrition|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-philosophy|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-prehistory|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-professional_law|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-public_relations|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-security_studies|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-sociology|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-virology|5_2024-01-13T18-20-38.822365.parquet", "**/details_harness|hendrycksTest-world_religions|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-anatomy|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-astronomy|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-college_biology|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-college_physics|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-computer_security|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-econometrics|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-global_facts|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-human_aging|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-international_law|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-management|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-management|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-management|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-marketing|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-nutrition|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-philosophy|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-prehistory|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-professional_law|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-public_relations|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-security_studies|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-sociology|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-virology|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-virology|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-virology|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|hendrycksTest-world_religions|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|truthfulqa:mc|0_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|truthfulqa:mc|0_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|truthfulqa:mc|0_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["**/details_harness|winogrande|5_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["**/details_harness|winogrande|5_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["**/details_harness|winogrande|5_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2024-01-13T18-20-38.822365.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_11_15T00_38_24.676368", "path": ["results_2023-11-15T00-38-24.676368.parquet"]}, {"split": "2023_12_04T19_27_31.992415", "path": ["results_2023-12-04T19-27-31.992415.parquet"]}, {"split": "2024_01_13T18_20_38.822365", "path": ["results_2024-01-13T18-20-38.822365.parquet"]}, {"split": "latest", "path": ["results_2024-01-13T18-20-38.822365.parquet"]}]}]} | 2024-01-13T18:23:20+00:00 | []
| []
| TAGS
#region-us
|
# Dataset Card for Evaluation run of ValiantLabs/ShiningValiantXS
Dataset automatically created during the evaluation run of model ValiantLabs/ShiningValiantXS on the Open LLM Leaderboard.
The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.
The dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).
To load the details from a run, you can for instance do the following:
## Latest results
These are the latest results from run 2024-01-13T18:20:38.822365(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
## Dataset Details
### Dataset Description
- Curated by:
- Funded by [optional]:
- Shared by [optional]:
- Language(s) (NLP):
- License:
### Dataset Sources [optional]
- Repository:
- Paper [optional]:
- Demo [optional]:
## Uses
### Direct Use
### Out-of-Scope Use
## Dataset Structure
## Dataset Creation
### Curation Rationale
### Source Data
#### Data Collection and Processing
#### Who are the source data producers?
### Annotations [optional]
#### Annotation process
#### Who are the annotators?
#### Personal and Sensitive Information
## Bias, Risks, and Limitations
### Recommendations
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
[optional]
BibTeX:
APA:
## Glossary [optional]
## More Information [optional]
## Dataset Card Authors [optional]
## Dataset Card Contact
| [
"# Dataset Card for Evaluation run of ValiantLabs/ShiningValiantXS\n\n\n\nDataset automatically created during the evaluation run of model ValiantLabs/ShiningValiantXS on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:",
"## Latest results\n\nThese are the latest results from run 2024-01-13T18:20:38.822365(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):",
"## Dataset Details",
"### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:",
"### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:",
"## Uses",
"### Direct Use",
"### Out-of-Scope Use",
"## Dataset Structure",
"## Dataset Creation",
"### Curation Rationale",
"### Source Data",
"#### Data Collection and Processing",
"#### Who are the source data producers?",
"### Annotations [optional]",
"#### Annotation process",
"#### Who are the annotators?",
"#### Personal and Sensitive Information",
"## Bias, Risks, and Limitations",
"### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:",
"## Glossary [optional]",
"## More Information [optional]",
"## Dataset Card Authors [optional]",
"## Dataset Card Contact"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for Evaluation run of ValiantLabs/ShiningValiantXS\n\n\n\nDataset automatically created during the evaluation run of model ValiantLabs/ShiningValiantXS on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:",
"## Latest results\n\nThese are the latest results from run 2024-01-13T18:20:38.822365(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):",
"## Dataset Details",
"### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:",
"### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:",
"## Uses",
"### Direct Use",
"### Out-of-Scope Use",
"## Dataset Structure",
"## Dataset Creation",
"### Curation Rationale",
"### Source Data",
"#### Data Collection and Processing",
"#### Who are the source data producers?",
"### Annotations [optional]",
"#### Annotation process",
"#### Who are the annotators?",
"#### Personal and Sensitive Information",
"## Bias, Risks, and Limitations",
"### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:",
"## Glossary [optional]",
"## More Information [optional]",
"## Dataset Card Authors [optional]",
"## Dataset Card Contact"
]
| [
6,
183,
68,
4,
40,
29,
3,
4,
9,
6,
5,
7,
4,
7,
10,
9,
5,
9,
8,
10,
46,
8,
7,
10,
5
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of ValiantLabs/ShiningValiantXS\n\n\n\nDataset automatically created during the evaluation run of model ValiantLabs/ShiningValiantXS on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 3 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2024-01-13T18:20:38.822365(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact"
]
|
ccdc86fd232f10eaaf6c8ac519e59451ca435354 | # Dataset Card for "pascal-voc-2012"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | jxie/pascal-voc-2012 | [
"region:us"
]
| 2023-11-15T00:45:48+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "mask", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1115392514.736, "num_examples": 10582}, {"name": "val", "num_bytes": 164042133.619, "num_examples": 1449}], "download_size": 1405650165, "dataset_size": 1279434648.355}} | 2023-11-16T06:41:31+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "pascal-voc-2012"
More Information needed | [
"# Dataset Card for \"pascal-voc-2012\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"pascal-voc-2012\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"pascal-voc-2012\"\n\nMore Information needed"
]
|
4783de6e089f8ef1dccb26e9f46d491ec0273c83 | # Dataset Card for "consumer_complaints_medium"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | determined-ai/consumer_complaints_medium | [
"region:us"
]
| 2023-11-15T00:46:28+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "Issue", "dtype": "string"}, {"name": "Consumer Complaint", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 19961957, "num_examples": 64292}, {"name": "test", "num_bytes": 6654389, "num_examples": 21439}], "download_size": 13546206, "dataset_size": 26616346}} | 2023-11-15T00:46:34+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "consumer_complaints_medium"
More Information needed | [
"# Dataset Card for \"consumer_complaints_medium\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"consumer_complaints_medium\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"consumer_complaints_medium\"\n\nMore Information needed"
]
|
399ec1c583e2c685a4baebaa4441ea303df58c4b | # Dataset Card for "validation_2000_cutoff_clean_llama_300_results"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Rewcifer/validation_2000_cutoff_clean_llama_300_results | [
"region:us"
]
| 2023-11-15T01:03:32+00:00 | {"dataset_info": {"features": [{"name": "labels_and_findings", "dtype": "string"}, {"name": "prompts", "dtype": "string"}, {"name": "true_findings", "dtype": "string"}, {"name": "generated_texts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2681029, "num_examples": 300}], "download_size": 681352, "dataset_size": 2681029}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T01:03:33+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "validation_2000_cutoff_clean_llama_300_results"
More Information needed | [
"# Dataset Card for \"validation_2000_cutoff_clean_llama_300_results\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"validation_2000_cutoff_clean_llama_300_results\"\n\nMore Information needed"
]
| [
6,
30
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"validation_2000_cutoff_clean_llama_300_results\"\n\nMore Information needed"
]
|
1e36a0018270f6e1c242168feec57fb35f378f4c | # Dataset Card for "snli-NOB"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | tollefj/snli-NOB | [
"license:cc-by-4.0",
"region:us"
]
| 2023-11-15T01:28:35+00:00 | {"license": "cc-by-4.0", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "premise", "dtype": "string"}, {"name": "hypothesis", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 63889357, "num_examples": 550152}, {"name": "validation", "num_bytes": 1225673, "num_examples": 10000}, {"name": "test", "num_bytes": 1218676, "num_examples": 10000}], "download_size": 19990084, "dataset_size": 66333706}} | 2024-01-06T12:27:40+00:00 | []
| []
| TAGS
#license-cc-by-4.0 #region-us
| # Dataset Card for "snli-NOB"
More Information needed | [
"# Dataset Card for \"snli-NOB\"\n\nMore Information needed"
]
| [
"TAGS\n#license-cc-by-4.0 #region-us \n",
"# Dataset Card for \"snli-NOB\"\n\nMore Information needed"
]
| [
15,
16
]
| [
"passage: TAGS\n#license-cc-by-4.0 #region-us \n# Dataset Card for \"snli-NOB\"\n\nMore Information needed"
]
|
56b9eec0c4054c57ff2406e11a15b3271424df03 | # Dataset Card for "best_outputs_3models"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Rewcifer/best_outputs_3models | [
"region:us"
]
| 2023-11-15T01:42:11+00:00 | {"dataset_info": {"features": [{"name": "true_findings", "dtype": "string"}, {"name": "generated_texts_1", "dtype": "string"}, {"name": "generated_texts_2", "dtype": "string"}, {"name": "generated_texts_3", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1861317, "num_examples": 861}], "download_size": 799511, "dataset_size": 1861317}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T01:43:40+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "best_outputs_3models"
More Information needed | [
"# Dataset Card for \"best_outputs_3models\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"best_outputs_3models\"\n\nMore Information needed"
]
| [
6,
19
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"best_outputs_3models\"\n\nMore Information needed"
]
|
79e2f14d9df6fb10307e4f50bc75ad1e2ad7ec6d | # Dataset Card for "best_outputs_selected_50_3model"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Rewcifer/best_outputs_selected_50_3model | [
"region:us"
]
| 2023-11-15T01:44:44+00:00 | {"dataset_info": {"features": [{"name": "true_findings", "dtype": "string"}, {"name": "generated_texts_1", "dtype": "string"}, {"name": "generated_texts_2", "dtype": "string"}, {"name": "generated_texts_3", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 108090.4181184669, "num_examples": 50}], "download_size": 83094, "dataset_size": 108090.4181184669}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T01:44:45+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "best_outputs_selected_50_3model"
More Information needed | [
"# Dataset Card for \"best_outputs_selected_50_3model\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"best_outputs_selected_50_3model\"\n\nMore Information needed"
]
| [
6,
23
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"best_outputs_selected_50_3model\"\n\nMore Information needed"
]
|
307444455adb47b4bbc8c4e9fe6d610e277b46a6 | # Dataset Card for "outputs_3models_300"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Rewcifer/outputs_3models_300 | [
"region:us"
]
| 2023-11-15T02:29:52+00:00 | {"dataset_info": {"features": [{"name": "labels", "dtype": "string"}, {"name": "true_findings", "dtype": "string"}, {"name": "generated_texts_1", "dtype": "string"}, {"name": "row_number", "dtype": "int64"}, {"name": "generated_texts_2", "dtype": "string"}, {"name": "generated_texts_3", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2020513, "num_examples": 300}], "download_size": 586799, "dataset_size": 2020513}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T02:29:54+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "outputs_3models_300"
More Information needed | [
"# Dataset Card for \"outputs_3models_300\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"outputs_3models_300\"\n\nMore Information needed"
]
| [
6,
19
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"outputs_3models_300\"\n\nMore Information needed"
]
|
2e42b84947acd5ee7d9232c975f0839ddb1d939c |
# MusicBench Dataset
The MusicBench dataset is a music audio-text pair dataset that was designed for text-to-music generation purpose and released along with Mustango text-to-music model. MusicBench is based on the MusicCaps dataset, which it expands from 5,521 samples to 52,768 training and 400 test samples!
## Dataset Details
MusicBench expands MusicCaps by:
1. Including music features of chords, beats, tempo, and key that are extracted from the audio.
2. Describing these music features using text templates and thus enhancing the original text prompts.
3. Expanding the number of audio samples by performing musically meaningful augmentations: semitone pitch shifts, tempo changes, and volume changes.
Train set size = 52,768 samples
Test set size = 400
### Dataset Description
MusicBench consists of 3 .json files and attached audio files in .tar.gz form.
The train set contains audio augmented samples and enhanced captions. Additionally, it offers ChatGPT rephrased captions for all the audio samples.
Both TestA and TestB sets contain the same audio content, but TestB has all 4 possible control sentences (related to 4 music features) in captions of all samples, while TestA has no control sentences in the captions.
For more details, see Figure 1 in our paper.
Each row of a .json file has:
1. **location** (of the files after decompressing the .tar.gz file)
2. **main_caption** – text prompts that are a result of augmentation (TestB contains control sentences, train set contains ChatGPT rephrased captions here)
3. **alt_caption** – in the case of TestB these are captions without any control sentences added.
4. prompt_aug – A control sentence related to volume change augmentation.
5. prompt_ch – A control sentence describing the chord sequence.
6. prompt_bt – A control sentence describing the beat count (meter)
7. prompt_bpm – A control sentence describing tempo, either in beats per minute (bpm), or in musical words, e.g., Adagio, Moderato, Presto.
8. prompt_key – A control sentence related to the extracted musical key.
9. **beats** – The beat and downbeat timestamps. This is used as an input for training Mustango.
10. bpm – The tempo feature saved as a number.
11. **chords** – The chord sequence contained in the track. This is used as an input for training Mustango.
12. **chords_time** – Timestamps of the detected chords. This is used as an input for training Mustango.
13. key – The root and the type of the detected key.
14. keyprob – The confidence score for this detected key provided by the detection algorithm.
# FMACaps Evaluation Dataset
Hereby, we also present you the FMACaps evaluation dataset which consists of 1000 samples extracted from the Free Music Archive (FMA) and pseudocaptioned through extracting tags from audio and then utilizing ChatGPT in-context learning. More information is available in our paper!
Most of the samples are 10 second long, exceptions are between 5 to 10 seconds long.
Data size: 1,000 samples
Sampling rate: 16 kHz
Files included:
1. 1,000 audio files in the "audiodata" folder
2. FMACaps_A – this file contains captions with NO control sentences.
3. FMACaps_B – this file contains captions with ALL control sentences. We used this file the our controllability evaluation of Mustango.
4. FMACaps_C – this file contains captions with SOME controls sentences. For each sample, we chose 0/1/2/3/4 control sentences with a probability of 25/30/20/15/10 %, as described in our paper. This file was used to objectively evaluate audio quality of Mustango.
The structure of each .json file is identical to MusicBench, as described in the previous section, with the exception of "alt_caption" column being empty. **All captions** are in the **"main_caption" column**!
## Links
- **Code Repository:** [https://github.com/AMAAI-Lab/mustango]
- **Paper:** [https://arxiv.org/abs/2311.08355]
- **Demo:** [https://replicate.com/declare-lab/mustango]
- **Website:** [https://amaai-lab.github.io/mustango/]
## Citation
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
```bibtex
@misc{melechovsky2023mustango,
title={Mustango: Toward Controllable Text-to-Music Generation},
author={Jan Melechovsky and Zixun Guo and Deepanway Ghosal and Navonil Majumder and Dorien Herremans and Soujanya Poria},
year={2023},
eprint={2311.08355},
archivePrefix={arXiv}
}
```
**License:** cc-by-sa-3.0 | amaai-lab/MusicBench | [
"license:cc-by-sa-3.0",
"arxiv:2311.08355",
"region:us"
]
| 2023-11-15T03:07:56+00:00 | {"license": "cc-by-sa-3.0"} | 2023-11-20T14:54:56+00:00 | [
"2311.08355"
]
| []
| TAGS
#license-cc-by-sa-3.0 #arxiv-2311.08355 #region-us
|
# MusicBench Dataset
The MusicBench dataset is a music audio-text pair dataset that was designed for text-to-music generation purpose and released along with Mustango text-to-music model. MusicBench is based on the MusicCaps dataset, which it expands from 5,521 samples to 52,768 training and 400 test samples!
## Dataset Details
MusicBench expands MusicCaps by:
1. Including music features of chords, beats, tempo, and key that are extracted from the audio.
2. Describing these music features using text templates and thus enhancing the original text prompts.
3. Expanding the number of audio samples by performing musically meaningful augmentations: semitone pitch shifts, tempo changes, and volume changes.
Train set size = 52,768 samples
Test set size = 400
### Dataset Description
MusicBench consists of 3 .json files and attached audio files in .URL form.
The train set contains audio augmented samples and enhanced captions. Additionally, it offers ChatGPT rephrased captions for all the audio samples.
Both TestA and TestB sets contain the same audio content, but TestB has all 4 possible control sentences (related to 4 music features) in captions of all samples, while TestA has no control sentences in the captions.
For more details, see Figure 1 in our paper.
Each row of a .json file has:
1. location (of the files after decompressing the .URL file)
2. main_caption – text prompts that are a result of augmentation (TestB contains control sentences, train set contains ChatGPT rephrased captions here)
3. alt_caption – in the case of TestB these are captions without any control sentences added.
4. prompt_aug – A control sentence related to volume change augmentation.
5. prompt_ch – A control sentence describing the chord sequence.
6. prompt_bt – A control sentence describing the beat count (meter)
7. prompt_bpm – A control sentence describing tempo, either in beats per minute (bpm), or in musical words, e.g., Adagio, Moderato, Presto.
8. prompt_key – A control sentence related to the extracted musical key.
9. beats – The beat and downbeat timestamps. This is used as an input for training Mustango.
10. bpm – The tempo feature saved as a number.
11. chords – The chord sequence contained in the track. This is used as an input for training Mustango.
12. chords_time – Timestamps of the detected chords. This is used as an input for training Mustango.
13. key – The root and the type of the detected key.
14. keyprob – The confidence score for this detected key provided by the detection algorithm.
# FMACaps Evaluation Dataset
Hereby, we also present you the FMACaps evaluation dataset which consists of 1000 samples extracted from the Free Music Archive (FMA) and pseudocaptioned through extracting tags from audio and then utilizing ChatGPT in-context learning. More information is available in our paper!
Most of the samples are 10 second long, exceptions are between 5 to 10 seconds long.
Data size: 1,000 samples
Sampling rate: 16 kHz
Files included:
1. 1,000 audio files in the "audiodata" folder
2. FMACaps_A – this file contains captions with NO control sentences.
3. FMACaps_B – this file contains captions with ALL control sentences. We used this file the our controllability evaluation of Mustango.
4. FMACaps_C – this file contains captions with SOME controls sentences. For each sample, we chose 0/1/2/3/4 control sentences with a probability of 25/30/20/15/10 %, as described in our paper. This file was used to objectively evaluate audio quality of Mustango.
The structure of each .json file is identical to MusicBench, as described in the previous section, with the exception of "alt_caption" column being empty. All captions are in the "main_caption" column!
## Links
- Code Repository: [URL
- Paper: [URL
- Demo: [URL
- Website: [URL
License: cc-by-sa-3.0 | [
"# MusicBench Dataset\n\nThe MusicBench dataset is a music audio-text pair dataset that was designed for text-to-music generation purpose and released along with Mustango text-to-music model. MusicBench is based on the MusicCaps dataset, which it expands from 5,521 samples to 52,768 training and 400 test samples!",
"## Dataset Details\nMusicBench expands MusicCaps by:\n1. Including music features of chords, beats, tempo, and key that are extracted from the audio.\n2. Describing these music features using text templates and thus enhancing the original text prompts.\n3. Expanding the number of audio samples by performing musically meaningful augmentations: semitone pitch shifts, tempo changes, and volume changes.\n\nTrain set size = 52,768 samples\nTest set size = 400",
"### Dataset Description\nMusicBench consists of 3 .json files and attached audio files in .URL form.\n\nThe train set contains audio augmented samples and enhanced captions. Additionally, it offers ChatGPT rephrased captions for all the audio samples.\nBoth TestA and TestB sets contain the same audio content, but TestB has all 4 possible control sentences (related to 4 music features) in captions of all samples, while TestA has no control sentences in the captions.\n\nFor more details, see Figure 1 in our paper.\n\n\nEach row of a .json file has:\n1. location (of the files after decompressing the .URL file)\n2. main_caption – text prompts that are a result of augmentation (TestB contains control sentences, train set contains ChatGPT rephrased captions here)\n3. alt_caption – in the case of TestB these are captions without any control sentences added.\n4. prompt_aug – A control sentence related to volume change augmentation.\n5. prompt_ch – A control sentence describing the chord sequence.\n6. prompt_bt – A control sentence describing the beat count (meter)\n7. prompt_bpm – A control sentence describing tempo, either in beats per minute (bpm), or in musical words, e.g., Adagio, Moderato, Presto.\n8. prompt_key – A control sentence related to the extracted musical key.\n9. beats – The beat and downbeat timestamps. This is used as an input for training Mustango.\n10. bpm – The tempo feature saved as a number.\n11. chords – The chord sequence contained in the track. This is used as an input for training Mustango.\n12. chords_time – Timestamps of the detected chords. This is used as an input for training Mustango.\n13. key – The root and the type of the detected key.\n14. keyprob – The confidence score for this detected key provided by the detection algorithm.",
"# FMACaps Evaluation Dataset\nHereby, we also present you the FMACaps evaluation dataset which consists of 1000 samples extracted from the Free Music Archive (FMA) and pseudocaptioned through extracting tags from audio and then utilizing ChatGPT in-context learning. More information is available in our paper!\n\nMost of the samples are 10 second long, exceptions are between 5 to 10 seconds long.\n\nData size: 1,000 samples\nSampling rate: 16 kHz\n\nFiles included:\n1. 1,000 audio files in the \"audiodata\" folder\n2. FMACaps_A – this file contains captions with NO control sentences.\n3. FMACaps_B – this file contains captions with ALL control sentences. We used this file the our controllability evaluation of Mustango.\n4. FMACaps_C – this file contains captions with SOME controls sentences. For each sample, we chose 0/1/2/3/4 control sentences with a probability of 25/30/20/15/10 %, as described in our paper. This file was used to objectively evaluate audio quality of Mustango.\n\nThe structure of each .json file is identical to MusicBench, as described in the previous section, with the exception of \"alt_caption\" column being empty. All captions are in the \"main_caption\" column!",
"## Links\n\n- Code Repository: [URL\n- Paper: [URL\n- Demo: [URL\n- Website: [URL\n\n\nLicense: cc-by-sa-3.0"
]
| [
"TAGS\n#license-cc-by-sa-3.0 #arxiv-2311.08355 #region-us \n",
"# MusicBench Dataset\n\nThe MusicBench dataset is a music audio-text pair dataset that was designed for text-to-music generation purpose and released along with Mustango text-to-music model. MusicBench is based on the MusicCaps dataset, which it expands from 5,521 samples to 52,768 training and 400 test samples!",
"## Dataset Details\nMusicBench expands MusicCaps by:\n1. Including music features of chords, beats, tempo, and key that are extracted from the audio.\n2. Describing these music features using text templates and thus enhancing the original text prompts.\n3. Expanding the number of audio samples by performing musically meaningful augmentations: semitone pitch shifts, tempo changes, and volume changes.\n\nTrain set size = 52,768 samples\nTest set size = 400",
"### Dataset Description\nMusicBench consists of 3 .json files and attached audio files in .URL form.\n\nThe train set contains audio augmented samples and enhanced captions. Additionally, it offers ChatGPT rephrased captions for all the audio samples.\nBoth TestA and TestB sets contain the same audio content, but TestB has all 4 possible control sentences (related to 4 music features) in captions of all samples, while TestA has no control sentences in the captions.\n\nFor more details, see Figure 1 in our paper.\n\n\nEach row of a .json file has:\n1. location (of the files after decompressing the .URL file)\n2. main_caption – text prompts that are a result of augmentation (TestB contains control sentences, train set contains ChatGPT rephrased captions here)\n3. alt_caption – in the case of TestB these are captions without any control sentences added.\n4. prompt_aug – A control sentence related to volume change augmentation.\n5. prompt_ch – A control sentence describing the chord sequence.\n6. prompt_bt – A control sentence describing the beat count (meter)\n7. prompt_bpm – A control sentence describing tempo, either in beats per minute (bpm), or in musical words, e.g., Adagio, Moderato, Presto.\n8. prompt_key – A control sentence related to the extracted musical key.\n9. beats – The beat and downbeat timestamps. This is used as an input for training Mustango.\n10. bpm – The tempo feature saved as a number.\n11. chords – The chord sequence contained in the track. This is used as an input for training Mustango.\n12. chords_time – Timestamps of the detected chords. This is used as an input for training Mustango.\n13. key – The root and the type of the detected key.\n14. keyprob – The confidence score for this detected key provided by the detection algorithm.",
"# FMACaps Evaluation Dataset\nHereby, we also present you the FMACaps evaluation dataset which consists of 1000 samples extracted from the Free Music Archive (FMA) and pseudocaptioned through extracting tags from audio and then utilizing ChatGPT in-context learning. More information is available in our paper!\n\nMost of the samples are 10 second long, exceptions are between 5 to 10 seconds long.\n\nData size: 1,000 samples\nSampling rate: 16 kHz\n\nFiles included:\n1. 1,000 audio files in the \"audiodata\" folder\n2. FMACaps_A – this file contains captions with NO control sentences.\n3. FMACaps_B – this file contains captions with ALL control sentences. We used this file the our controllability evaluation of Mustango.\n4. FMACaps_C – this file contains captions with SOME controls sentences. For each sample, we chose 0/1/2/3/4 control sentences with a probability of 25/30/20/15/10 %, as described in our paper. This file was used to objectively evaluate audio quality of Mustango.\n\nThe structure of each .json file is identical to MusicBench, as described in the previous section, with the exception of \"alt_caption\" column being empty. All captions are in the \"main_caption\" column!",
"## Links\n\n- Code Repository: [URL\n- Paper: [URL\n- Demo: [URL\n- Website: [URL\n\n\nLicense: cc-by-sa-3.0"
]
| [
26,
78,
110,
451,
302,
35
]
| [
"passage: TAGS\n#license-cc-by-sa-3.0 #arxiv-2311.08355 #region-us \n# MusicBench Dataset\n\nThe MusicBench dataset is a music audio-text pair dataset that was designed for text-to-music generation purpose and released along with Mustango text-to-music model. MusicBench is based on the MusicCaps dataset, which it expands from 5,521 samples to 52,768 training and 400 test samples!## Dataset Details\nMusicBench expands MusicCaps by:\n1. Including music features of chords, beats, tempo, and key that are extracted from the audio.\n2. Describing these music features using text templates and thus enhancing the original text prompts.\n3. Expanding the number of audio samples by performing musically meaningful augmentations: semitone pitch shifts, tempo changes, and volume changes.\n\nTrain set size = 52,768 samples\nTest set size = 400"
]
|
9f454a240fea1bf1545b6bf75fd30f10ac1cf60f | # Dataset Card for "dumb_whole"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | jlbaker361/dumb_whole | [
"region:us"
]
| 2023-11-15T03:13:54+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "float64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 225.0, "num_examples": 9}, {"name": "test", "num_bytes": 25, "num_examples": 1}], "download_size": 3294, "dataset_size": 250.0}} | 2023-11-17T05:53:47+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "dumb_whole"
More Information needed | [
"# Dataset Card for \"dumb_whole\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"dumb_whole\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"dumb_whole\"\n\nMore Information needed"
]
|
f56a3aff42dec024a2abffa1b7d087745e4054cd | # Tony Chase Transcripts
Around 3500 transcripts of videos from [Tony Chase](https://www.youtube.com/@tonychaseiamnotgaynotnever2043) captioned with GPT-3.5-Turbo. | v2ray/Tony-Chase-Transcripts | [
"task_categories:text-generation",
"size_categories:1K<n<10K",
"language:en",
"license:mit",
"not-for-all-audiences",
"region:us"
]
| 2023-11-15T03:26:39+00:00 | {"language": ["en"], "license": "mit", "size_categories": ["1K<n<10K"], "task_categories": ["text-generation"], "tags": ["not-for-all-audiences"]} | 2023-12-08T15:53:11+00:00 | []
| [
"en"
]
| TAGS
#task_categories-text-generation #size_categories-1K<n<10K #language-English #license-mit #not-for-all-audiences #region-us
| # Tony Chase Transcripts
Around 3500 transcripts of videos from Tony Chase captioned with GPT-3.5-Turbo. | [
"# Tony Chase Transcripts\nAround 3500 transcripts of videos from Tony Chase captioned with GPT-3.5-Turbo."
]
| [
"TAGS\n#task_categories-text-generation #size_categories-1K<n<10K #language-English #license-mit #not-for-all-audiences #region-us \n",
"# Tony Chase Transcripts\nAround 3500 transcripts of videos from Tony Chase captioned with GPT-3.5-Turbo."
]
| [
47,
30
]
| [
"passage: TAGS\n#task_categories-text-generation #size_categories-1K<n<10K #language-English #license-mit #not-for-all-audiences #region-us \n# Tony Chase Transcripts\nAround 3500 transcripts of videos from Tony Chase captioned with GPT-3.5-Turbo."
]
|
21c94ae389238becad55271b69426ac99168db1e | # Dataset Card for "rap-lyrics-v2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | nateraw/rap-lyrics-v2 | [
"region:us"
]
| 2023-11-15T03:32:13+00:00 | {"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "num_tokens", "dtype": "int64"}, {"name": "completion", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4236119, "num_examples": 7319}], "download_size": 2186104, "dataset_size": 4236119}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T03:32:19+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "rap-lyrics-v2"
More Information needed | [
"# Dataset Card for \"rap-lyrics-v2\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"rap-lyrics-v2\"\n\nMore Information needed"
]
| [
6,
18
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"rap-lyrics-v2\"\n\nMore Information needed"
]
|
92ccb427a0ab15f5495a8146a489e9ac8156fdb0 |
# Dataset Card for Evaluation run of lgaalves/gpt2-xl_lima
## Dataset Description
- **Homepage:**
- **Repository:** https://huggingface.co/lgaalves/gpt2-xl_lima
- **Paper:**
- **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard
- **Point of Contact:** [email protected]
### Dataset Summary
Dataset automatically created during the evaluation run of model [lgaalves/gpt2-xl_lima](https://huggingface.co/lgaalves/gpt2-xl_lima) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).
The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset("open-llm-leaderboard/details_lgaalves__gpt2-xl_lima_public",
"harness_winogrande_5",
split="train")
```
## Latest results
These are the [latest results from run 2023-11-15T03:46:31.104311](https://huggingface.co/datasets/open-llm-leaderboard/details_lgaalves__gpt2-xl_lima_public/blob/main/results_2023-11-15T03-46-31.104311.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"acc": 0.2579848503192349,
"acc_stderr": 0.030758432385023834,
"acc_norm": 0.25961199994409145,
"acc_norm_stderr": 0.03153372055003476,
"mc1": 0.2252141982864137,
"mc1_stderr": 0.014623240768023507,
"mc2": 0.3874325444900457,
"mc2_stderr": 0.014089660369122726,
"em": 0.002726510067114094,
"em_stderr": 0.0005340111700415908,
"f1": 0.04890100671140956,
"f1_stderr": 0.0013085576550093093
},
"harness|arc:challenge|25": {
"acc": 0.2645051194539249,
"acc_stderr": 0.012889272949313368,
"acc_norm": 0.31143344709897613,
"acc_norm_stderr": 0.013532472099850949
},
"harness|hellaswag|10": {
"acc": 0.39842660824536946,
"acc_stderr": 0.004885735963346903,
"acc_norm": 0.5128460466042621,
"acc_norm_stderr": 0.004988134303021793
},
"harness|hendrycksTest-abstract_algebra|5": {
"acc": 0.26,
"acc_stderr": 0.04408440022768081,
"acc_norm": 0.26,
"acc_norm_stderr": 0.04408440022768081
},
"harness|hendrycksTest-anatomy|5": {
"acc": 0.24444444444444444,
"acc_stderr": 0.03712537833614865,
"acc_norm": 0.24444444444444444,
"acc_norm_stderr": 0.03712537833614865
},
"harness|hendrycksTest-astronomy|5": {
"acc": 0.2236842105263158,
"acc_stderr": 0.033911609343436025,
"acc_norm": 0.2236842105263158,
"acc_norm_stderr": 0.033911609343436025
},
"harness|hendrycksTest-business_ethics|5": {
"acc": 0.22,
"acc_stderr": 0.04163331998932269,
"acc_norm": 0.22,
"acc_norm_stderr": 0.04163331998932269
},
"harness|hendrycksTest-clinical_knowledge|5": {
"acc": 0.30566037735849055,
"acc_stderr": 0.028353298073322666,
"acc_norm": 0.30566037735849055,
"acc_norm_stderr": 0.028353298073322666
},
"harness|hendrycksTest-college_biology|5": {
"acc": 0.2777777777777778,
"acc_stderr": 0.037455547914624576,
"acc_norm": 0.2777777777777778,
"acc_norm_stderr": 0.037455547914624576
},
"harness|hendrycksTest-college_chemistry|5": {
"acc": 0.22,
"acc_stderr": 0.041633319989322695,
"acc_norm": 0.22,
"acc_norm_stderr": 0.041633319989322695
},
"harness|hendrycksTest-college_computer_science|5": {
"acc": 0.3,
"acc_stderr": 0.046056618647183814,
"acc_norm": 0.3,
"acc_norm_stderr": 0.046056618647183814
},
"harness|hendrycksTest-college_mathematics|5": {
"acc": 0.32,
"acc_stderr": 0.04688261722621503,
"acc_norm": 0.32,
"acc_norm_stderr": 0.04688261722621503
},
"harness|hendrycksTest-college_medicine|5": {
"acc": 0.3352601156069364,
"acc_stderr": 0.03599586301247078,
"acc_norm": 0.3352601156069364,
"acc_norm_stderr": 0.03599586301247078
},
"harness|hendrycksTest-college_physics|5": {
"acc": 0.13725490196078433,
"acc_stderr": 0.03424084669891522,
"acc_norm": 0.13725490196078433,
"acc_norm_stderr": 0.03424084669891522
},
"harness|hendrycksTest-computer_security|5": {
"acc": 0.26,
"acc_stderr": 0.0440844002276808,
"acc_norm": 0.26,
"acc_norm_stderr": 0.0440844002276808
},
"harness|hendrycksTest-conceptual_physics|5": {
"acc": 0.251063829787234,
"acc_stderr": 0.028346963777162445,
"acc_norm": 0.251063829787234,
"acc_norm_stderr": 0.028346963777162445
},
"harness|hendrycksTest-econometrics|5": {
"acc": 0.23684210526315788,
"acc_stderr": 0.039994238792813344,
"acc_norm": 0.23684210526315788,
"acc_norm_stderr": 0.039994238792813344
},
"harness|hendrycksTest-electrical_engineering|5": {
"acc": 0.2827586206896552,
"acc_stderr": 0.03752833958003336,
"acc_norm": 0.2827586206896552,
"acc_norm_stderr": 0.03752833958003336
},
"harness|hendrycksTest-elementary_mathematics|5": {
"acc": 0.20105820105820105,
"acc_stderr": 0.020641810782370165,
"acc_norm": 0.20105820105820105,
"acc_norm_stderr": 0.020641810782370165
},
"harness|hendrycksTest-formal_logic|5": {
"acc": 0.30158730158730157,
"acc_stderr": 0.041049472699033945,
"acc_norm": 0.30158730158730157,
"acc_norm_stderr": 0.041049472699033945
},
"harness|hendrycksTest-global_facts|5": {
"acc": 0.17,
"acc_stderr": 0.0377525168068637,
"acc_norm": 0.17,
"acc_norm_stderr": 0.0377525168068637
},
"harness|hendrycksTest-high_school_biology|5": {
"acc": 0.20967741935483872,
"acc_stderr": 0.02315787934908352,
"acc_norm": 0.20967741935483872,
"acc_norm_stderr": 0.02315787934908352
},
"harness|hendrycksTest-high_school_chemistry|5": {
"acc": 0.15763546798029557,
"acc_stderr": 0.025639014131172408,
"acc_norm": 0.15763546798029557,
"acc_norm_stderr": 0.025639014131172408
},
"harness|hendrycksTest-high_school_computer_science|5": {
"acc": 0.29,
"acc_stderr": 0.04560480215720684,
"acc_norm": 0.29,
"acc_norm_stderr": 0.04560480215720684
},
"harness|hendrycksTest-high_school_european_history|5": {
"acc": 0.3151515151515151,
"acc_stderr": 0.0362773057502241,
"acc_norm": 0.3151515151515151,
"acc_norm_stderr": 0.0362773057502241
},
"harness|hendrycksTest-high_school_geography|5": {
"acc": 0.3383838383838384,
"acc_stderr": 0.03371124142626302,
"acc_norm": 0.3383838383838384,
"acc_norm_stderr": 0.03371124142626302
},
"harness|hendrycksTest-high_school_government_and_politics|5": {
"acc": 0.22797927461139897,
"acc_stderr": 0.03027690994517825,
"acc_norm": 0.22797927461139897,
"acc_norm_stderr": 0.03027690994517825
},
"harness|hendrycksTest-high_school_macroeconomics|5": {
"acc": 0.3564102564102564,
"acc_stderr": 0.024283140529467295,
"acc_norm": 0.3564102564102564,
"acc_norm_stderr": 0.024283140529467295
},
"harness|hendrycksTest-high_school_mathematics|5": {
"acc": 0.2111111111111111,
"acc_stderr": 0.024882116857655078,
"acc_norm": 0.2111111111111111,
"acc_norm_stderr": 0.024882116857655078
},
"harness|hendrycksTest-high_school_microeconomics|5": {
"acc": 0.226890756302521,
"acc_stderr": 0.02720537153827948,
"acc_norm": 0.226890756302521,
"acc_norm_stderr": 0.02720537153827948
},
"harness|hendrycksTest-high_school_physics|5": {
"acc": 0.2980132450331126,
"acc_stderr": 0.03734535676787198,
"acc_norm": 0.2980132450331126,
"acc_norm_stderr": 0.03734535676787198
},
"harness|hendrycksTest-high_school_psychology|5": {
"acc": 0.3522935779816514,
"acc_stderr": 0.020480568843999,
"acc_norm": 0.3522935779816514,
"acc_norm_stderr": 0.020480568843999
},
"harness|hendrycksTest-high_school_statistics|5": {
"acc": 0.4537037037037037,
"acc_stderr": 0.033953227263757976,
"acc_norm": 0.4537037037037037,
"acc_norm_stderr": 0.033953227263757976
},
"harness|hendrycksTest-high_school_us_history|5": {
"acc": 0.20098039215686275,
"acc_stderr": 0.028125972265654373,
"acc_norm": 0.20098039215686275,
"acc_norm_stderr": 0.028125972265654373
},
"harness|hendrycksTest-high_school_world_history|5": {
"acc": 0.20675105485232068,
"acc_stderr": 0.026361651668389094,
"acc_norm": 0.20675105485232068,
"acc_norm_stderr": 0.026361651668389094
},
"harness|hendrycksTest-human_aging|5": {
"acc": 0.14349775784753363,
"acc_stderr": 0.0235293712696182,
"acc_norm": 0.14349775784753363,
"acc_norm_stderr": 0.0235293712696182
},
"harness|hendrycksTest-human_sexuality|5": {
"acc": 0.24427480916030533,
"acc_stderr": 0.037683359597287434,
"acc_norm": 0.24427480916030533,
"acc_norm_stderr": 0.037683359597287434
},
"harness|hendrycksTest-international_law|5": {
"acc": 0.15702479338842976,
"acc_stderr": 0.0332124484254713,
"acc_norm": 0.15702479338842976,
"acc_norm_stderr": 0.0332124484254713
},
"harness|hendrycksTest-jurisprudence|5": {
"acc": 0.25925925925925924,
"acc_stderr": 0.042365112580946336,
"acc_norm": 0.25925925925925924,
"acc_norm_stderr": 0.042365112580946336
},
"harness|hendrycksTest-logical_fallacies|5": {
"acc": 0.27607361963190186,
"acc_stderr": 0.0351238528370505,
"acc_norm": 0.27607361963190186,
"acc_norm_stderr": 0.0351238528370505
},
"harness|hendrycksTest-machine_learning|5": {
"acc": 0.2857142857142857,
"acc_stderr": 0.042878587513404565,
"acc_norm": 0.2857142857142857,
"acc_norm_stderr": 0.042878587513404565
},
"harness|hendrycksTest-management|5": {
"acc": 0.27184466019417475,
"acc_stderr": 0.044052680241409216,
"acc_norm": 0.27184466019417475,
"acc_norm_stderr": 0.044052680241409216
},
"harness|hendrycksTest-marketing|5": {
"acc": 0.23931623931623933,
"acc_stderr": 0.02795182680892433,
"acc_norm": 0.23931623931623933,
"acc_norm_stderr": 0.02795182680892433
},
"harness|hendrycksTest-medical_genetics|5": {
"acc": 0.3,
"acc_stderr": 0.046056618647183814,
"acc_norm": 0.3,
"acc_norm_stderr": 0.046056618647183814
},
"harness|hendrycksTest-miscellaneous|5": {
"acc": 0.26181353767560667,
"acc_stderr": 0.015720838678445256,
"acc_norm": 0.26181353767560667,
"acc_norm_stderr": 0.015720838678445256
},
"harness|hendrycksTest-moral_disputes|5": {
"acc": 0.24855491329479767,
"acc_stderr": 0.023267528432100174,
"acc_norm": 0.24855491329479767,
"acc_norm_stderr": 0.023267528432100174
},
"harness|hendrycksTest-moral_scenarios|5": {
"acc": 0.2424581005586592,
"acc_stderr": 0.014333522059217889,
"acc_norm": 0.2424581005586592,
"acc_norm_stderr": 0.014333522059217889
},
"harness|hendrycksTest-nutrition|5": {
"acc": 0.22549019607843138,
"acc_stderr": 0.023929155517351294,
"acc_norm": 0.22549019607843138,
"acc_norm_stderr": 0.023929155517351294
},
"harness|hendrycksTest-philosophy|5": {
"acc": 0.2057877813504823,
"acc_stderr": 0.022961339906764244,
"acc_norm": 0.2057877813504823,
"acc_norm_stderr": 0.022961339906764244
},
"harness|hendrycksTest-prehistory|5": {
"acc": 0.25617283950617287,
"acc_stderr": 0.0242885336377261,
"acc_norm": 0.25617283950617287,
"acc_norm_stderr": 0.0242885336377261
},
"harness|hendrycksTest-professional_accounting|5": {
"acc": 0.23049645390070922,
"acc_stderr": 0.02512373922687241,
"acc_norm": 0.23049645390070922,
"acc_norm_stderr": 0.02512373922687241
},
"harness|hendrycksTest-professional_law|5": {
"acc": 0.24119947848761408,
"acc_stderr": 0.010926496102034956,
"acc_norm": 0.24119947848761408,
"acc_norm_stderr": 0.010926496102034956
},
"harness|hendrycksTest-professional_medicine|5": {
"acc": 0.19852941176470587,
"acc_stderr": 0.024231013370541107,
"acc_norm": 0.19852941176470587,
"acc_norm_stderr": 0.024231013370541107
},
"harness|hendrycksTest-professional_psychology|5": {
"acc": 0.2434640522875817,
"acc_stderr": 0.017362473762146634,
"acc_norm": 0.2434640522875817,
"acc_norm_stderr": 0.017362473762146634
},
"harness|hendrycksTest-public_relations|5": {
"acc": 0.2727272727272727,
"acc_stderr": 0.04265792110940588,
"acc_norm": 0.2727272727272727,
"acc_norm_stderr": 0.04265792110940588
},
"harness|hendrycksTest-security_studies|5": {
"acc": 0.2163265306122449,
"acc_stderr": 0.02635891633490403,
"acc_norm": 0.2163265306122449,
"acc_norm_stderr": 0.02635891633490403
},
"harness|hendrycksTest-sociology|5": {
"acc": 0.2537313432835821,
"acc_stderr": 0.03076944496729602,
"acc_norm": 0.2537313432835821,
"acc_norm_stderr": 0.03076944496729602
},
"harness|hendrycksTest-us_foreign_policy|5": {
"acc": 0.24,
"acc_stderr": 0.04292346959909282,
"acc_norm": 0.24,
"acc_norm_stderr": 0.04292346959909282
},
"harness|hendrycksTest-virology|5": {
"acc": 0.26506024096385544,
"acc_stderr": 0.03436024037944967,
"acc_norm": 0.26506024096385544,
"acc_norm_stderr": 0.03436024037944967
},
"harness|hendrycksTest-world_religions|5": {
"acc": 0.3216374269005848,
"acc_stderr": 0.03582529442573122,
"acc_norm": 0.3216374269005848,
"acc_norm_stderr": 0.03582529442573122
},
"harness|truthfulqa:mc|0": {
"mc1": 0.2252141982864137,
"mc1_stderr": 0.014623240768023507,
"mc2": 0.3874325444900457,
"mc2_stderr": 0.014089660369122726
},
"harness|winogrande|5": {
"acc": 0.5722178374112076,
"acc_stderr": 0.013905134013839943
},
"harness|drop|3": {
"em": 0.002726510067114094,
"em_stderr": 0.0005340111700415908,
"f1": 0.04890100671140956,
"f1_stderr": 0.0013085576550093093
},
"harness|gsm8k|5": {
"acc": 0.009097801364670205,
"acc_stderr": 0.002615326510775673
}
}
```
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
[More Information Needed] | open-llm-leaderboard/details_lgaalves__gpt2-xl_lima | [
"region:us"
]
| 2023-11-15T03:48:13+00:00 | {"pretty_name": "Evaluation run of lgaalves/gpt2-xl_lima", "dataset_summary": "Dataset automatically created during the evaluation run of model [lgaalves/gpt2-xl_lima](https://huggingface.co/lgaalves/gpt2-xl_lima) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_lgaalves__gpt2-xl_lima_public\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-11-15T03:46:31.104311](https://huggingface.co/datasets/open-llm-leaderboard/details_lgaalves__gpt2-xl_lima_public/blob/main/results_2023-11-15T03-46-31.104311.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.2579848503192349,\n \"acc_stderr\": 0.030758432385023834,\n \"acc_norm\": 0.25961199994409145,\n \"acc_norm_stderr\": 0.03153372055003476,\n \"mc1\": 0.2252141982864137,\n \"mc1_stderr\": 0.014623240768023507,\n \"mc2\": 0.3874325444900457,\n \"mc2_stderr\": 0.014089660369122726,\n \"em\": 0.002726510067114094,\n \"em_stderr\": 0.0005340111700415908,\n \"f1\": 0.04890100671140956,\n \"f1_stderr\": 0.0013085576550093093\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.2645051194539249,\n \"acc_stderr\": 0.012889272949313368,\n \"acc_norm\": 0.31143344709897613,\n \"acc_norm_stderr\": 0.013532472099850949\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.39842660824536946,\n \"acc_stderr\": 0.004885735963346903,\n \"acc_norm\": 0.5128460466042621,\n \"acc_norm_stderr\": 0.004988134303021793\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.26,\n \"acc_stderr\": 0.04408440022768081,\n \"acc_norm\": 0.26,\n \"acc_norm_stderr\": 0.04408440022768081\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.24444444444444444,\n \"acc_stderr\": 0.03712537833614865,\n \"acc_norm\": 0.24444444444444444,\n \"acc_norm_stderr\": 0.03712537833614865\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.2236842105263158,\n \"acc_stderr\": 0.033911609343436025,\n \"acc_norm\": 0.2236842105263158,\n \"acc_norm_stderr\": 0.033911609343436025\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.22,\n \"acc_stderr\": 0.04163331998932269,\n \"acc_norm\": 0.22,\n \"acc_norm_stderr\": 0.04163331998932269\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.30566037735849055,\n \"acc_stderr\": 0.028353298073322666,\n \"acc_norm\": 0.30566037735849055,\n \"acc_norm_stderr\": 0.028353298073322666\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.2777777777777778,\n \"acc_stderr\": 0.037455547914624576,\n \"acc_norm\": 0.2777777777777778,\n \"acc_norm_stderr\": 0.037455547914624576\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.22,\n \"acc_stderr\": 0.041633319989322695,\n \"acc_norm\": 0.22,\n \"acc_norm_stderr\": 0.041633319989322695\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.3,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.3,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.32,\n \"acc_stderr\": 0.04688261722621503,\n \"acc_norm\": 0.32,\n \"acc_norm_stderr\": 0.04688261722621503\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.3352601156069364,\n \"acc_stderr\": 0.03599586301247078,\n \"acc_norm\": 0.3352601156069364,\n \"acc_norm_stderr\": 0.03599586301247078\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.13725490196078433,\n \"acc_stderr\": 0.03424084669891522,\n \"acc_norm\": 0.13725490196078433,\n \"acc_norm_stderr\": 0.03424084669891522\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.26,\n \"acc_stderr\": 0.0440844002276808,\n \"acc_norm\": 0.26,\n \"acc_norm_stderr\": 0.0440844002276808\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.251063829787234,\n \"acc_stderr\": 0.028346963777162445,\n \"acc_norm\": 0.251063829787234,\n \"acc_norm_stderr\": 0.028346963777162445\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.23684210526315788,\n \"acc_stderr\": 0.039994238792813344,\n \"acc_norm\": 0.23684210526315788,\n \"acc_norm_stderr\": 0.039994238792813344\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.2827586206896552,\n \"acc_stderr\": 0.03752833958003336,\n \"acc_norm\": 0.2827586206896552,\n \"acc_norm_stderr\": 0.03752833958003336\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.20105820105820105,\n \"acc_stderr\": 0.020641810782370165,\n \"acc_norm\": 0.20105820105820105,\n \"acc_norm_stderr\": 0.020641810782370165\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.30158730158730157,\n \"acc_stderr\": 0.041049472699033945,\n \"acc_norm\": 0.30158730158730157,\n \"acc_norm_stderr\": 0.041049472699033945\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.17,\n \"acc_stderr\": 0.0377525168068637,\n \"acc_norm\": 0.17,\n \"acc_norm_stderr\": 0.0377525168068637\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.20967741935483872,\n \"acc_stderr\": 0.02315787934908352,\n \"acc_norm\": 0.20967741935483872,\n \"acc_norm_stderr\": 0.02315787934908352\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.15763546798029557,\n \"acc_stderr\": 0.025639014131172408,\n \"acc_norm\": 0.15763546798029557,\n \"acc_norm_stderr\": 0.025639014131172408\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.29,\n \"acc_stderr\": 0.04560480215720684,\n \"acc_norm\": 0.29,\n \"acc_norm_stderr\": 0.04560480215720684\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.3151515151515151,\n \"acc_stderr\": 0.0362773057502241,\n \"acc_norm\": 0.3151515151515151,\n \"acc_norm_stderr\": 0.0362773057502241\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.3383838383838384,\n \"acc_stderr\": 0.03371124142626302,\n \"acc_norm\": 0.3383838383838384,\n \"acc_norm_stderr\": 0.03371124142626302\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.22797927461139897,\n \"acc_stderr\": 0.03027690994517825,\n \"acc_norm\": 0.22797927461139897,\n \"acc_norm_stderr\": 0.03027690994517825\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.3564102564102564,\n \"acc_stderr\": 0.024283140529467295,\n \"acc_norm\": 0.3564102564102564,\n \"acc_norm_stderr\": 0.024283140529467295\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.2111111111111111,\n \"acc_stderr\": 0.024882116857655078,\n \"acc_norm\": 0.2111111111111111,\n \"acc_norm_stderr\": 0.024882116857655078\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.226890756302521,\n \"acc_stderr\": 0.02720537153827948,\n \"acc_norm\": 0.226890756302521,\n \"acc_norm_stderr\": 0.02720537153827948\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.2980132450331126,\n \"acc_stderr\": 0.03734535676787198,\n \"acc_norm\": 0.2980132450331126,\n \"acc_norm_stderr\": 0.03734535676787198\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.3522935779816514,\n \"acc_stderr\": 0.020480568843999,\n \"acc_norm\": 0.3522935779816514,\n \"acc_norm_stderr\": 0.020480568843999\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.4537037037037037,\n \"acc_stderr\": 0.033953227263757976,\n \"acc_norm\": 0.4537037037037037,\n \"acc_norm_stderr\": 0.033953227263757976\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.20098039215686275,\n \"acc_stderr\": 0.028125972265654373,\n \"acc_norm\": 0.20098039215686275,\n \"acc_norm_stderr\": 0.028125972265654373\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.20675105485232068,\n \"acc_stderr\": 0.026361651668389094,\n \"acc_norm\": 0.20675105485232068,\n \"acc_norm_stderr\": 0.026361651668389094\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.14349775784753363,\n \"acc_stderr\": 0.0235293712696182,\n \"acc_norm\": 0.14349775784753363,\n \"acc_norm_stderr\": 0.0235293712696182\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.24427480916030533,\n \"acc_stderr\": 0.037683359597287434,\n \"acc_norm\": 0.24427480916030533,\n \"acc_norm_stderr\": 0.037683359597287434\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.15702479338842976,\n \"acc_stderr\": 0.0332124484254713,\n \"acc_norm\": 0.15702479338842976,\n \"acc_norm_stderr\": 0.0332124484254713\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.25925925925925924,\n \"acc_stderr\": 0.042365112580946336,\n \"acc_norm\": 0.25925925925925924,\n \"acc_norm_stderr\": 0.042365112580946336\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.27607361963190186,\n \"acc_stderr\": 0.0351238528370505,\n \"acc_norm\": 0.27607361963190186,\n \"acc_norm_stderr\": 0.0351238528370505\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.2857142857142857,\n \"acc_stderr\": 0.042878587513404565,\n \"acc_norm\": 0.2857142857142857,\n \"acc_norm_stderr\": 0.042878587513404565\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.27184466019417475,\n \"acc_stderr\": 0.044052680241409216,\n \"acc_norm\": 0.27184466019417475,\n \"acc_norm_stderr\": 0.044052680241409216\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.23931623931623933,\n \"acc_stderr\": 0.02795182680892433,\n \"acc_norm\": 0.23931623931623933,\n \"acc_norm_stderr\": 0.02795182680892433\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.3,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.3,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.26181353767560667,\n \"acc_stderr\": 0.015720838678445256,\n \"acc_norm\": 0.26181353767560667,\n \"acc_norm_stderr\": 0.015720838678445256\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.24855491329479767,\n \"acc_stderr\": 0.023267528432100174,\n \"acc_norm\": 0.24855491329479767,\n \"acc_norm_stderr\": 0.023267528432100174\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.2424581005586592,\n \"acc_stderr\": 0.014333522059217889,\n \"acc_norm\": 0.2424581005586592,\n \"acc_norm_stderr\": 0.014333522059217889\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.22549019607843138,\n \"acc_stderr\": 0.023929155517351294,\n \"acc_norm\": 0.22549019607843138,\n \"acc_norm_stderr\": 0.023929155517351294\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.2057877813504823,\n \"acc_stderr\": 0.022961339906764244,\n \"acc_norm\": 0.2057877813504823,\n \"acc_norm_stderr\": 0.022961339906764244\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.25617283950617287,\n \"acc_stderr\": 0.0242885336377261,\n \"acc_norm\": 0.25617283950617287,\n \"acc_norm_stderr\": 0.0242885336377261\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.23049645390070922,\n \"acc_stderr\": 0.02512373922687241,\n \"acc_norm\": 0.23049645390070922,\n \"acc_norm_stderr\": 0.02512373922687241\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.24119947848761408,\n \"acc_stderr\": 0.010926496102034956,\n \"acc_norm\": 0.24119947848761408,\n \"acc_norm_stderr\": 0.010926496102034956\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.19852941176470587,\n \"acc_stderr\": 0.024231013370541107,\n \"acc_norm\": 0.19852941176470587,\n \"acc_norm_stderr\": 0.024231013370541107\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.2434640522875817,\n \"acc_stderr\": 0.017362473762146634,\n \"acc_norm\": 0.2434640522875817,\n \"acc_norm_stderr\": 0.017362473762146634\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.2727272727272727,\n \"acc_stderr\": 0.04265792110940588,\n \"acc_norm\": 0.2727272727272727,\n \"acc_norm_stderr\": 0.04265792110940588\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.2163265306122449,\n \"acc_stderr\": 0.02635891633490403,\n \"acc_norm\": 0.2163265306122449,\n \"acc_norm_stderr\": 0.02635891633490403\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.2537313432835821,\n \"acc_stderr\": 0.03076944496729602,\n \"acc_norm\": 0.2537313432835821,\n \"acc_norm_stderr\": 0.03076944496729602\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.24,\n \"acc_stderr\": 0.04292346959909282,\n \"acc_norm\": 0.24,\n \"acc_norm_stderr\": 0.04292346959909282\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.26506024096385544,\n \"acc_stderr\": 0.03436024037944967,\n \"acc_norm\": 0.26506024096385544,\n \"acc_norm_stderr\": 0.03436024037944967\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.3216374269005848,\n \"acc_stderr\": 0.03582529442573122,\n \"acc_norm\": 0.3216374269005848,\n \"acc_norm_stderr\": 0.03582529442573122\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.2252141982864137,\n \"mc1_stderr\": 0.014623240768023507,\n \"mc2\": 0.3874325444900457,\n \"mc2_stderr\": 0.014089660369122726\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.5722178374112076,\n \"acc_stderr\": 0.013905134013839943\n },\n \"harness|drop|3\": {\n \"em\": 0.002726510067114094,\n \"em_stderr\": 0.0005340111700415908,\n \"f1\": 0.04890100671140956,\n \"f1_stderr\": 0.0013085576550093093\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.009097801364670205,\n \"acc_stderr\": 0.002615326510775673\n }\n}\n```", "repo_url": "https://huggingface.co/lgaalves/gpt2-xl_lima", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|arc:challenge|25_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|drop|3_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|gsm8k|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hellaswag|10_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-management|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-virology|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-management|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-virology|5_2023-11-15T03-46-31.104311.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-management|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-virology|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|truthfulqa:mc|0_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["**/details_harness|winogrande|5_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-11-15T03-46-31.104311.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_11_15T03_46_31.104311", "path": ["results_2023-11-15T03-46-31.104311.parquet"]}, {"split": "latest", "path": ["results_2023-11-15T03-46-31.104311.parquet"]}]}]} | 2023-11-15T03:48:58+00:00 | []
| []
| TAGS
#region-us
|
# Dataset Card for Evaluation run of lgaalves/gpt2-xl_lima
## Dataset Description
- Homepage:
- Repository: URL
- Paper:
- Leaderboard: URL
- Point of Contact: clementine@URL
### Dataset Summary
Dataset automatically created during the evaluation run of model lgaalves/gpt2-xl_lima on the Open LLM Leaderboard.
The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).
To load the details from a run, you can for instance do the following:
## Latest results
These are the latest results from run 2023-11-15T03:46:31.104311(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
### Supported Tasks and Leaderboards
### Languages
## Dataset Structure
### Data Instances
### Data Fields
### Data Splits
## Dataset Creation
### Curation Rationale
### Source Data
#### Initial Data Collection and Normalization
#### Who are the source language producers?
### Annotations
#### Annotation process
#### Who are the annotators?
### Personal and Sensitive Information
## Considerations for Using the Data
### Social Impact of Dataset
### Discussion of Biases
### Other Known Limitations
## Additional Information
### Dataset Curators
### Licensing Information
### Contributions
| [
"# Dataset Card for Evaluation run of lgaalves/gpt2-xl_lima",
"## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL",
"### Dataset Summary\n\nDataset automatically created during the evaluation run of model lgaalves/gpt2-xl_lima on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:",
"## Latest results\n\nThese are the latest results from run 2023-11-15T03:46:31.104311(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):",
"### Supported Tasks and Leaderboards",
"### Languages",
"## Dataset Structure",
"### Data Instances",
"### Data Fields",
"### Data Splits",
"## Dataset Creation",
"### Curation Rationale",
"### Source Data",
"#### Initial Data Collection and Normalization",
"#### Who are the source language producers?",
"### Annotations",
"#### Annotation process",
"#### Who are the annotators?",
"### Personal and Sensitive Information",
"## Considerations for Using the Data",
"### Social Impact of Dataset",
"### Discussion of Biases",
"### Other Known Limitations",
"## Additional Information",
"### Dataset Curators",
"### Licensing Information",
"### Contributions"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for Evaluation run of lgaalves/gpt2-xl_lima",
"## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL",
"### Dataset Summary\n\nDataset automatically created during the evaluation run of model lgaalves/gpt2-xl_lima on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:",
"## Latest results\n\nThese are the latest results from run 2023-11-15T03:46:31.104311(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):",
"### Supported Tasks and Leaderboards",
"### Languages",
"## Dataset Structure",
"### Data Instances",
"### Data Fields",
"### Data Splits",
"## Dataset Creation",
"### Curation Rationale",
"### Source Data",
"#### Initial Data Collection and Normalization",
"#### Who are the source language producers?",
"### Annotations",
"#### Annotation process",
"#### Who are the annotators?",
"### Personal and Sensitive Information",
"## Considerations for Using the Data",
"### Social Impact of Dataset",
"### Discussion of Biases",
"### Other Known Limitations",
"## Additional Information",
"### Dataset Curators",
"### Licensing Information",
"### Contributions"
]
| [
6,
20,
31,
169,
67,
10,
4,
6,
6,
5,
5,
5,
7,
4,
10,
10,
5,
5,
9,
8,
8,
7,
8,
7,
5,
6,
6,
5
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of lgaalves/gpt2-xl_lima## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model lgaalves/gpt2-xl_lima on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-11-15T03:46:31.104311(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions"
]
|
43e94bc53126a19c687f713fbd5bc22a14659dfa | # Dataset Card for "mimiciii-hospitalcourse-meta"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | dmacres/mimiciii-hospitalcourse-meta | [
"region:us"
]
| 2023-11-15T03:55:19+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "subject_id", "dtype": "int64"}, {"name": "hadm_id", "dtype": "float64"}, {"name": "target_text", "dtype": "string"}, {"name": "extractive_notes_summ", "dtype": "string"}, {"name": "n_notes", "dtype": "int64"}, {"name": "notes", "list": [{"name": "category", "dtype": "string"}, {"name": "chartdate", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "row_id", "dtype": "int64"}, {"name": "text", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 1518715010, "num_examples": 24993}, {"name": "validation", "num_bytes": 342865059, "num_examples": 5356}, {"name": "test", "num_bytes": 326661857, "num_examples": 5356}], "download_size": 896512070, "dataset_size": 2188241926}} | 2023-11-15T04:07:10+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "mimiciii-hospitalcourse-meta"
More Information needed | [
"# Dataset Card for \"mimiciii-hospitalcourse-meta\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"mimiciii-hospitalcourse-meta\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"mimiciii-hospitalcourse-meta\"\n\nMore Information needed"
]
|
611ac03c37b7a2838f4181b06ae5e474b587629f | # Dataset Card for "nips_data_v7"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Percent-BFD/nips_data_v7 | [
"region:us"
]
| 2023-11-15T04:08:07+00:00 | {"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "data_source", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 435578567, "num_examples": 246906}], "download_size": 266845557, "dataset_size": 435578567}} | 2023-11-15T04:09:55+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "nips_data_v7"
More Information needed | [
"# Dataset Card for \"nips_data_v7\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"nips_data_v7\"\n\nMore Information needed"
]
| [
6,
17
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"nips_data_v7\"\n\nMore Information needed"
]
|
54479ae2cdbc9508be89ebe347b7875d63099b4a | # Dataset Card for "dumb_decimal"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | jlbaker361/dumb_decimal | [
"region:us"
]
| 2023-11-15T04:18:47+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "float64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 225.0, "num_examples": 9}, {"name": "test", "num_bytes": 25, "num_examples": 1}], "download_size": 3294, "dataset_size": 250.0}} | 2023-11-17T05:54:01+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "dumb_decimal"
More Information needed | [
"# Dataset Card for \"dumb_decimal\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"dumb_decimal\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"dumb_decimal\"\n\nMore Information needed"
]
|
e423ec039119161b0dac69251f0594ee0f6b1ac6 | # Dataset Card for "viet-pretrained-002"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | vilm/viet-pretrained-002 | [
"region:us"
]
| 2023-11-15T04:22:27+00:00 | {"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4502549932, "num_examples": 258823}], "download_size": 2324002719, "dataset_size": 4502549932}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T04:24:21+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "viet-pretrained-002"
More Information needed | [
"# Dataset Card for \"viet-pretrained-002\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"viet-pretrained-002\"\n\nMore Information needed"
]
| [
6,
17
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"viet-pretrained-002\"\n\nMore Information needed"
]
|
e46a8dd793170011890ee6e8272ffda042f0c611 | # Dataset Card for "oasst"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | TinyPixel/oasst | [
"region:us"
]
| 2023-11-15T04:32:12+00:00 | {"dataset_info": {"features": [{"name": "message_tree_id", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 15836903, "num_examples": 9823}], "download_size": 9334076, "dataset_size": 15836903}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-12-17T12:37:49+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "oasst"
More Information needed | [
"# Dataset Card for \"oasst\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"oasst\"\n\nMore Information needed"
]
| [
6,
13
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"oasst\"\n\nMore Information needed"
]
|
60558f8c8294d50cd9c7f64226fe428ac561bc06 | # Dataset Card for "llama-intent-1615"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | pankajemplay/llama-intent-1615 | [
"region:us"
]
| 2023-11-15T04:41:46+00:00 | {"dataset_info": {"features": [{"name": "User Query", "dtype": "string"}, {"name": "Intent", "dtype": "string"}, {"name": "id type", "dtype": "string"}, {"name": "id value", "dtype": "string"}, {"name": "id slot filled", "dtype": "bool"}, {"name": "Task", "dtype": "string"}, {"name": "task slot filled", "dtype": "bool"}, {"name": "Bot Response", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 759033, "num_examples": 1615}], "download_size": 221927, "dataset_size": 759033}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T04:41:48+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "llama-intent-1615"
More Information needed | [
"# Dataset Card for \"llama-intent-1615\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"llama-intent-1615\"\n\nMore Information needed"
]
| [
6,
17
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"llama-intent-1615\"\n\nMore Information needed"
]
|
4f59df9cb8903367ddeea75e5ad916efe7de746b | # govgis_nov2023
🤖 This README was written by GPT-4. 🤖
`govgis_nov2023` is an extensive compilation of metadata, documenting geospatial data from known government servers as of November 15 2023. This should provide a rich resource for GIS analysis, research, and application development.
These datasets contain data from various Federal, State, County, and City ArcGIS Servers listed by Joseph Elfelt of [Mapping Support](https://mappingsupport.com). It serves as a unique snapshot capturing the state of these servers in November 2023.
This repo contains the [very messy] notebooks with the code used to compile the data and save it in parquet format.
## Overview
- Content: Includes three primary files: servers.parquet, services.parquet, and layers.parquet, offering detailed insights into numerous GIS servers and layers.
- Size and Scope: The dataset covers data from 1684 servers, detailing almost a million individual layers with extensive metadata including field information for feature layers, cell size for raster layers, etc.
- Format: Data is stored in Parquet format, facilitating efficient storage and quick access.
- Status: This is a static snapshot and not actively maintained like Joseph Elfelt’s ongoing listings. However, this foundation may evolve into a maintained index.
## Data Collection
- Tools & Libraries Used: Data was collected using the [`restgdf`](https://github.com/joshuasundance-swca/restgdf) library, designed for efficient and asynchronous interaction with ArcGIS servers.
- Process: The dataset was created by scraping information from a wide range of ArcGIS servers, focusing on capturing a comprehensive and detailed snapshot as of November 2023.
- Verification: While data integrity was a focus, the dataset was not subjected to extensive cleaning, preserving the raw and detailed nature of the information.
## Data Processing
- Data Cleaning: Minimal cleaning was conducted to maintain the dataset's comprehensive and raw nature, allowing users to filter and process data as needed.
- Data Transformation: Collected data was standardized and converted into Parquet format for ease of use and accessibility.
## Use Cases
The `govgis_nov2023` dataset can be utilized for:
- Educational and Research Purposes: A valuable resource for GIS students, educators, and researchers.
- Geospatial Data Analysis: Ideal for analysts and data scientists for conducting extensive geospatial analyses.
- GIS Application Development: Useful for developers in building or enhancing GIS-related applications.
- Language Model Integration: The dataset can be used to train or evaluate language models for generating descriptions or summaries of GIS data.
## Conclusion
- Creation: This dataset was created using the restgdf library, emphasizing the potential of open-source contributions in the GIS field.
- Data Source: The dataset comprises data from publicly accessible ArcGIS servers. The dataset creator has no affiliation with Joseph Elfelt, MappingSupport.com, or the servers' respective owners.
| joshuasundance/govgis_nov2023 | [
"size_categories:100K<n<1M",
"language:en",
"license:mit",
"gis",
"geospatial",
"doi:10.57967/hf/1368",
"region:us"
]
| 2023-11-15T04:54:34+00:00 | {"language": ["en"], "license": "mit", "size_categories": ["100K<n<1M"], "tags": ["gis", "geospatial"]} | 2023-11-17T16:16:30+00:00 | []
| [
"en"
]
| TAGS
#size_categories-100K<n<1M #language-English #license-mit #gis #geospatial #doi-10.57967/hf/1368 #region-us
| # govgis_nov2023
This README was written by GPT-4.
'govgis_nov2023' is an extensive compilation of metadata, documenting geospatial data from known government servers as of November 15 2023. This should provide a rich resource for GIS analysis, research, and application development.
These datasets contain data from various Federal, State, County, and City ArcGIS Servers listed by Joseph Elfelt of Mapping Support. It serves as a unique snapshot capturing the state of these servers in November 2023.
This repo contains the [very messy] notebooks with the code used to compile the data and save it in parquet format.
## Overview
- Content: Includes three primary files: servers.parquet, services.parquet, and layers.parquet, offering detailed insights into numerous GIS servers and layers.
- Size and Scope: The dataset covers data from 1684 servers, detailing almost a million individual layers with extensive metadata including field information for feature layers, cell size for raster layers, etc.
- Format: Data is stored in Parquet format, facilitating efficient storage and quick access.
- Status: This is a static snapshot and not actively maintained like Joseph Elfelt’s ongoing listings. However, this foundation may evolve into a maintained index.
## Data Collection
- Tools & Libraries Used: Data was collected using the 'restgdf' library, designed for efficient and asynchronous interaction with ArcGIS servers.
- Process: The dataset was created by scraping information from a wide range of ArcGIS servers, focusing on capturing a comprehensive and detailed snapshot as of November 2023.
- Verification: While data integrity was a focus, the dataset was not subjected to extensive cleaning, preserving the raw and detailed nature of the information.
## Data Processing
- Data Cleaning: Minimal cleaning was conducted to maintain the dataset's comprehensive and raw nature, allowing users to filter and process data as needed.
- Data Transformation: Collected data was standardized and converted into Parquet format for ease of use and accessibility.
## Use Cases
The 'govgis_nov2023' dataset can be utilized for:
- Educational and Research Purposes: A valuable resource for GIS students, educators, and researchers.
- Geospatial Data Analysis: Ideal for analysts and data scientists for conducting extensive geospatial analyses.
- GIS Application Development: Useful for developers in building or enhancing GIS-related applications.
- Language Model Integration: The dataset can be used to train or evaluate language models for generating descriptions or summaries of GIS data.
## Conclusion
- Creation: This dataset was created using the restgdf library, emphasizing the potential of open-source contributions in the GIS field.
- Data Source: The dataset comprises data from publicly accessible ArcGIS servers. The dataset creator has no affiliation with Joseph Elfelt, URL, or the servers' respective owners.
| [
"# govgis_nov2023\n\n This README was written by GPT-4. \n\n'govgis_nov2023' is an extensive compilation of metadata, documenting geospatial data from known government servers as of November 15 2023. This should provide a rich resource for GIS analysis, research, and application development.\n\nThese datasets contain data from various Federal, State, County, and City ArcGIS Servers listed by Joseph Elfelt of Mapping Support. It serves as a unique snapshot capturing the state of these servers in November 2023.\n\nThis repo contains the [very messy] notebooks with the code used to compile the data and save it in parquet format.",
"## Overview\n\n- Content: Includes three primary files: servers.parquet, services.parquet, and layers.parquet, offering detailed insights into numerous GIS servers and layers.\n- Size and Scope: The dataset covers data from 1684 servers, detailing almost a million individual layers with extensive metadata including field information for feature layers, cell size for raster layers, etc.\n- Format: Data is stored in Parquet format, facilitating efficient storage and quick access.\n- Status: This is a static snapshot and not actively maintained like Joseph Elfelt’s ongoing listings. However, this foundation may evolve into a maintained index.",
"## Data Collection\n\n- Tools & Libraries Used: Data was collected using the 'restgdf' library, designed for efficient and asynchronous interaction with ArcGIS servers.\n- Process: The dataset was created by scraping information from a wide range of ArcGIS servers, focusing on capturing a comprehensive and detailed snapshot as of November 2023.\n- Verification: While data integrity was a focus, the dataset was not subjected to extensive cleaning, preserving the raw and detailed nature of the information.",
"## Data Processing\n- Data Cleaning: Minimal cleaning was conducted to maintain the dataset's comprehensive and raw nature, allowing users to filter and process data as needed.\n- Data Transformation: Collected data was standardized and converted into Parquet format for ease of use and accessibility.",
"## Use Cases\nThe 'govgis_nov2023' dataset can be utilized for:\n\n- Educational and Research Purposes: A valuable resource for GIS students, educators, and researchers.\n- Geospatial Data Analysis: Ideal for analysts and data scientists for conducting extensive geospatial analyses.\n- GIS Application Development: Useful for developers in building or enhancing GIS-related applications.\n- Language Model Integration: The dataset can be used to train or evaluate language models for generating descriptions or summaries of GIS data.",
"## Conclusion\n\n- Creation: This dataset was created using the restgdf library, emphasizing the potential of open-source contributions in the GIS field.\n- Data Source: The dataset comprises data from publicly accessible ArcGIS servers. The dataset creator has no affiliation with Joseph Elfelt, URL, or the servers' respective owners."
]
| [
"TAGS\n#size_categories-100K<n<1M #language-English #license-mit #gis #geospatial #doi-10.57967/hf/1368 #region-us \n",
"# govgis_nov2023\n\n This README was written by GPT-4. \n\n'govgis_nov2023' is an extensive compilation of metadata, documenting geospatial data from known government servers as of November 15 2023. This should provide a rich resource for GIS analysis, research, and application development.\n\nThese datasets contain data from various Federal, State, County, and City ArcGIS Servers listed by Joseph Elfelt of Mapping Support. It serves as a unique snapshot capturing the state of these servers in November 2023.\n\nThis repo contains the [very messy] notebooks with the code used to compile the data and save it in parquet format.",
"## Overview\n\n- Content: Includes three primary files: servers.parquet, services.parquet, and layers.parquet, offering detailed insights into numerous GIS servers and layers.\n- Size and Scope: The dataset covers data from 1684 servers, detailing almost a million individual layers with extensive metadata including field information for feature layers, cell size for raster layers, etc.\n- Format: Data is stored in Parquet format, facilitating efficient storage and quick access.\n- Status: This is a static snapshot and not actively maintained like Joseph Elfelt’s ongoing listings. However, this foundation may evolve into a maintained index.",
"## Data Collection\n\n- Tools & Libraries Used: Data was collected using the 'restgdf' library, designed for efficient and asynchronous interaction with ArcGIS servers.\n- Process: The dataset was created by scraping information from a wide range of ArcGIS servers, focusing on capturing a comprehensive and detailed snapshot as of November 2023.\n- Verification: While data integrity was a focus, the dataset was not subjected to extensive cleaning, preserving the raw and detailed nature of the information.",
"## Data Processing\n- Data Cleaning: Minimal cleaning was conducted to maintain the dataset's comprehensive and raw nature, allowing users to filter and process data as needed.\n- Data Transformation: Collected data was standardized and converted into Parquet format for ease of use and accessibility.",
"## Use Cases\nThe 'govgis_nov2023' dataset can be utilized for:\n\n- Educational and Research Purposes: A valuable resource for GIS students, educators, and researchers.\n- Geospatial Data Analysis: Ideal for analysts and data scientists for conducting extensive geospatial analyses.\n- GIS Application Development: Useful for developers in building or enhancing GIS-related applications.\n- Language Model Integration: The dataset can be used to train or evaluate language models for generating descriptions or summaries of GIS data.",
"## Conclusion\n\n- Creation: This dataset was created using the restgdf library, emphasizing the potential of open-source contributions in the GIS field.\n- Data Source: The dataset comprises data from publicly accessible ArcGIS servers. The dataset creator has no affiliation with Joseph Elfelt, URL, or the servers' respective owners."
]
| [
46,
151,
153,
116,
65,
129,
81
]
| [
"passage: TAGS\n#size_categories-100K<n<1M #language-English #license-mit #gis #geospatial #doi-10.57967/hf/1368 #region-us \n# govgis_nov2023\n\n This README was written by GPT-4. \n\n'govgis_nov2023' is an extensive compilation of metadata, documenting geospatial data from known government servers as of November 15 2023. This should provide a rich resource for GIS analysis, research, and application development.\n\nThese datasets contain data from various Federal, State, County, and City ArcGIS Servers listed by Joseph Elfelt of Mapping Support. It serves as a unique snapshot capturing the state of these servers in November 2023.\n\nThis repo contains the [very messy] notebooks with the code used to compile the data and save it in parquet format.## Overview\n\n- Content: Includes three primary files: servers.parquet, services.parquet, and layers.parquet, offering detailed insights into numerous GIS servers and layers.\n- Size and Scope: The dataset covers data from 1684 servers, detailing almost a million individual layers with extensive metadata including field information for feature layers, cell size for raster layers, etc.\n- Format: Data is stored in Parquet format, facilitating efficient storage and quick access.\n- Status: This is a static snapshot and not actively maintained like Joseph Elfelt’s ongoing listings. However, this foundation may evolve into a maintained index.## Data Collection\n\n- Tools & Libraries Used: Data was collected using the 'restgdf' library, designed for efficient and asynchronous interaction with ArcGIS servers.\n- Process: The dataset was created by scraping information from a wide range of ArcGIS servers, focusing on capturing a comprehensive and detailed snapshot as of November 2023.\n- Verification: While data integrity was a focus, the dataset was not subjected to extensive cleaning, preserving the raw and detailed nature of the information."
]
|
fb0b2db1479a9a25b4270229e14024ceaba5a561 |
# AMEBA Blog FAQ Search Dataset
This data was obtained by crawling [this website](https://helps.ameba.jp/faq/).
The FAQ Data was processed to remove HTML tags and other formatting after crawling, and entries containing excessively long content were excluded.
The Query Data was generated using a Large Language Model (LLM). Please refer to the following blog for information about the generation process.
- https://www.ai-shift.co.jp/techblog/3710
- https://www.ai-shift.co.jp/techblog/3761
## Column description
FAQ Data (target_faq.csv)
- ID: Unique ID of the FAQ
- Title: Title of the FAQ
- Content: Answer content of the FAQ
Query Data (queries_{train/validation/test}.csv)
- ID: Unique ID of the correct FAQ
- Query: Question text
- difficulty: The difficulty level of the problem
- Whether the problem is related to the correct FAQ in the training set.
- If "easy", it is included in the train data, and if "difficult", it is not included in the train data.
- The train data are all "easy". | ai-shift/ameba_faq_search | [
"task_categories:question-answering",
"size_categories:100K<n<1M",
"language:ja",
"license:cc-by-nd-4.0",
"region:us"
]
| 2023-11-15T04:58:19+00:00 | {"language": ["ja"], "license": "cc-by-nd-4.0", "size_categories": ["100K<n<1M"], "task_categories": ["question-answering"]} | 2023-12-26T02:21:58+00:00 | []
| [
"ja"
]
| TAGS
#task_categories-question-answering #size_categories-100K<n<1M #language-Japanese #license-cc-by-nd-4.0 #region-us
|
# AMEBA Blog FAQ Search Dataset
This data was obtained by crawling this website.
The FAQ Data was processed to remove HTML tags and other formatting after crawling, and entries containing excessively long content were excluded.
The Query Data was generated using a Large Language Model (LLM). Please refer to the following blog for information about the generation process.
- URL
- URL
## Column description
FAQ Data (target_faq.csv)
- ID: Unique ID of the FAQ
- Title: Title of the FAQ
- Content: Answer content of the FAQ
Query Data (queries_{train/validation/test}.csv)
- ID: Unique ID of the correct FAQ
- Query: Question text
- difficulty: The difficulty level of the problem
- Whether the problem is related to the correct FAQ in the training set.
- If "easy", it is included in the train data, and if "difficult", it is not included in the train data.
- The train data are all "easy". | [
"# AMEBA Blog FAQ Search Dataset\n\n\nThis data was obtained by crawling this website.\n\nThe FAQ Data was processed to remove HTML tags and other formatting after crawling, and entries containing excessively long content were excluded.\n\nThe Query Data was generated using a Large Language Model (LLM). Please refer to the following blog for information about the generation process.\n\n- URL\n- URL",
"## Column description\n\nFAQ Data (target_faq.csv)\n\n- ID: Unique ID of the FAQ\n- Title: Title of the FAQ\n- Content: Answer content of the FAQ\n\nQuery Data (queries_{train/validation/test}.csv)\n\n- ID: Unique ID of the correct FAQ\n- Query: Question text\n- difficulty: The difficulty level of the problem\n - Whether the problem is related to the correct FAQ in the training set.\n - If \"easy\", it is included in the train data, and if \"difficult\", it is not included in the train data.\n - The train data are all \"easy\"."
]
| [
"TAGS\n#task_categories-question-answering #size_categories-100K<n<1M #language-Japanese #license-cc-by-nd-4.0 #region-us \n",
"# AMEBA Blog FAQ Search Dataset\n\n\nThis data was obtained by crawling this website.\n\nThe FAQ Data was processed to remove HTML tags and other formatting after crawling, and entries containing excessively long content were excluded.\n\nThe Query Data was generated using a Large Language Model (LLM). Please refer to the following blog for information about the generation process.\n\n- URL\n- URL",
"## Column description\n\nFAQ Data (target_faq.csv)\n\n- ID: Unique ID of the FAQ\n- Title: Title of the FAQ\n- Content: Answer content of the FAQ\n\nQuery Data (queries_{train/validation/test}.csv)\n\n- ID: Unique ID of the correct FAQ\n- Query: Question text\n- difficulty: The difficulty level of the problem\n - Whether the problem is related to the correct FAQ in the training set.\n - If \"easy\", it is included in the train data, and if \"difficult\", it is not included in the train data.\n - The train data are all \"easy\"."
]
| [
47,
84,
143
]
| [
"passage: TAGS\n#task_categories-question-answering #size_categories-100K<n<1M #language-Japanese #license-cc-by-nd-4.0 #region-us \n# AMEBA Blog FAQ Search Dataset\n\n\nThis data was obtained by crawling this website.\n\nThe FAQ Data was processed to remove HTML tags and other formatting after crawling, and entries containing excessively long content were excluded.\n\nThe Query Data was generated using a Large Language Model (LLM). Please refer to the following blog for information about the generation process.\n\n- URL\n- URL## Column description\n\nFAQ Data (target_faq.csv)\n\n- ID: Unique ID of the FAQ\n- Title: Title of the FAQ\n- Content: Answer content of the FAQ\n\nQuery Data (queries_{train/validation/test}.csv)\n\n- ID: Unique ID of the correct FAQ\n- Query: Question text\n- difficulty: The difficulty level of the problem\n - Whether the problem is related to the correct FAQ in the training set.\n - If \"easy\", it is included in the train data, and if \"difficult\", it is not included in the train data.\n - The train data are all \"easy\"."
]
|
73b41d225fad96d1e88d010259e285641365eab5 | # Dataset Card for "preprocessed_race_for_multiple_choice"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Liberty-L/preprocessed_race_for_multiple_choice | [
"region:us"
]
| 2023-11-15T05:00:46+00:00 | {"dataset_info": {"features": [{"name": "data_index_by_user", "dtype": "int64"}, {"name": "article", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "input_ids", "sequence": {"sequence": "int32"}}, {"name": "token_type_ids", "sequence": {"sequence": "int8"}}, {"name": "attention_mask", "sequence": {"sequence": "int8"}}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 683451159, "num_examples": 62866}], "download_size": 143191809, "dataset_size": 683451159}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T05:05:01+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "preprocessed_race_for_multiple_choice"
More Information needed | [
"# Dataset Card for \"preprocessed_race_for_multiple_choice\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"preprocessed_race_for_multiple_choice\"\n\nMore Information needed"
]
| [
6,
23
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"preprocessed_race_for_multiple_choice\"\n\nMore Information needed"
]
|
10a9bdfded011ddf4da04bfb0fbff2f3638f950c | # Dataset Card for "race_val"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Liberty-L/race_val | [
"region:us"
]
| 2023-11-15T05:05:30+00:00 | {"dataset_info": {"features": [{"name": "data_index_by_user", "dtype": "int64"}, {"name": "article", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "input_ids", "sequence": {"sequence": "int32"}}, {"name": "token_type_ids", "sequence": {"sequence": "int8"}}, {"name": "attention_mask", "sequence": {"sequence": "int8"}}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 38434030, "num_examples": 3547}], "download_size": 8065978, "dataset_size": 38434030}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T05:06:11+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "race_val"
More Information needed | [
"# Dataset Card for \"race_val\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"race_val\"\n\nMore Information needed"
]
| [
6,
13
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"race_val\"\n\nMore Information needed"
]
|
667eac8dbecdd9174b1775e908d66cfe3aabea11 | # Dataset Card for "race_test"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Liberty-L/race_test | [
"region:us"
]
| 2023-11-15T05:05:52+00:00 | {"dataset_info": {"features": [{"name": "data_index_by_user", "dtype": "int64"}, {"name": "article", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "input_ids", "sequence": {"sequence": "int32"}}, {"name": "token_type_ids", "sequence": {"sequence": "int8"}}, {"name": "attention_mask", "sequence": {"sequence": "int8"}}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 39853553, "num_examples": 3638}], "download_size": 8418028, "dataset_size": 39853553}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T05:06:14+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "race_test"
More Information needed | [
"# Dataset Card for \"race_test\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"race_test\"\n\nMore Information needed"
]
| [
6,
13
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"race_test\"\n\nMore Information needed"
]
|
d0bbe735e7092473328196adb677534056997879 |
inspired by thefcraft/civitai-stable-diffusion-337k.
collected using civitai api to get all prompts. | hanruijiang/civitai-stable-diffusion-2.5m | [
"task_categories:text-generation",
"task_categories:text-to-image",
"size_categories:1M<n<10M",
"language:en",
"license:apache-2.0",
"art",
"region:us"
]
| 2023-11-15T05:54:10+00:00 | {"language": ["en"], "license": "apache-2.0", "size_categories": ["1M<n<10M"], "task_categories": ["text-generation", "text-to-image"], "tags": ["art"]} | 2023-11-15T06:23:47+00:00 | []
| [
"en"
]
| TAGS
#task_categories-text-generation #task_categories-text-to-image #size_categories-1M<n<10M #language-English #license-apache-2.0 #art #region-us
|
inspired by thefcraft/civitai-stable-diffusion-337k.
collected using civitai api to get all prompts. | []
| [
"TAGS\n#task_categories-text-generation #task_categories-text-to-image #size_categories-1M<n<10M #language-English #license-apache-2.0 #art #region-us \n"
]
| [
55
]
| [
"passage: TAGS\n#task_categories-text-generation #task_categories-text-to-image #size_categories-1M<n<10M #language-English #license-apache-2.0 #art #region-us \n"
]
|
0ebe1938d7f32f15350fe5142571a0ecfae8e223 | good | kkk0001/gd | [
"task_categories:text-classification",
"language:en",
"license:mit",
"music",
"chemistry",
"region:us"
]
| 2023-11-15T05:58:49+00:00 | {"language": ["en"], "license": "mit", "task_categories": ["text-classification"], "tags": ["music", "chemistry"]} | 2023-11-15T06:08:49+00:00 | []
| [
"en"
]
| TAGS
#task_categories-text-classification #language-English #license-mit #music #chemistry #region-us
| good | []
| [
"TAGS\n#task_categories-text-classification #language-English #license-mit #music #chemistry #region-us \n"
]
| [
32
]
| [
"passage: TAGS\n#task_categories-text-classification #language-English #license-mit #music #chemistry #region-us \n"
]
|
ce97424e3e90a611f64079f1abab9546fe2a8af3 | # Dataset Card for "qrcode"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | surabhiMV/qrcode | [
"region:us"
]
| 2023-11-15T06:06:38+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}, {"name": "bbox", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 18269599.0, "num_examples": 502}], "download_size": 17289588, "dataset_size": 18269599.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T06:06:42+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "qrcode"
More Information needed | [
"# Dataset Card for \"qrcode\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"qrcode\"\n\nMore Information needed"
]
| [
6,
13
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"qrcode\"\n\nMore Information needed"
]
|
a70731ba83ff09f32a3434bb7ce49218ade250d9 | # Dataset Card for "qrcode_val"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | surabhiMV/qrcode_val | [
"region:us"
]
| 2023-11-15T06:08:26+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}, {"name": "bbox", "sequence": {"sequence": "float64"}}], "splits": [{"name": "train", "num_bytes": 2138545.0, "num_examples": 60}], "download_size": 2038351, "dataset_size": 2138545.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T06:08:29+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "qrcode_val"
More Information needed | [
"# Dataset Card for \"qrcode_val\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"qrcode_val\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"qrcode_val\"\n\nMore Information needed"
]
|
618d1bca5ba1c2a86c166e8fa43775fbfe75ff76 | # DPO formatted Helpful and Harmless RLHF Dataset
This dataset is built from [Anthropic's hh-rlhf dataset.](https://huggingface.co/datasets/Anthropic/hh-rlhf)
It is modified as follows:
The prompt formatting is switched to the Llama 2 format with [INST] and [/INST]
The data is split into prompt, chosen and rejected rows, as required by HuggingFace's DPO trainer.
Purchase access to this dataset here. Purchase entitles the user to make use of the dataset for training large language models. | objects76/Anthropic-hh-rlhf-dpo | [
"license:apache-2.0",
"region:us"
]
| 2023-11-15T06:39:50+00:00 | {"license": "apache-2.0"} | 2023-11-15T06:46:29+00:00 | []
| []
| TAGS
#license-apache-2.0 #region-us
| # DPO formatted Helpful and Harmless RLHF Dataset
This dataset is built from Anthropic's hh-rlhf dataset.
It is modified as follows:
The prompt formatting is switched to the Llama 2 format with [INST] and [/INST]
The data is split into prompt, chosen and rejected rows, as required by HuggingFace's DPO trainer.
Purchase access to this dataset here. Purchase entitles the user to make use of the dataset for training large language models. | [
"# DPO formatted Helpful and Harmless RLHF Dataset\nThis dataset is built from Anthropic's hh-rlhf dataset.\n\nIt is modified as follows:\n\nThe prompt formatting is switched to the Llama 2 format with [INST] and [/INST]\nThe data is split into prompt, chosen and rejected rows, as required by HuggingFace's DPO trainer.\nPurchase access to this dataset here. Purchase entitles the user to make use of the dataset for training large language models."
]
| [
"TAGS\n#license-apache-2.0 #region-us \n",
"# DPO formatted Helpful and Harmless RLHF Dataset\nThis dataset is built from Anthropic's hh-rlhf dataset.\n\nIt is modified as follows:\n\nThe prompt formatting is switched to the Llama 2 format with [INST] and [/INST]\nThe data is split into prompt, chosen and rejected rows, as required by HuggingFace's DPO trainer.\nPurchase access to this dataset here. Purchase entitles the user to make use of the dataset for training large language models."
]
| [
14,
129
]
| [
"passage: TAGS\n#license-apache-2.0 #region-us \n# DPO formatted Helpful and Harmless RLHF Dataset\nThis dataset is built from Anthropic's hh-rlhf dataset.\n\nIt is modified as follows:\n\nThe prompt formatting is switched to the Llama 2 format with [INST] and [/INST]\nThe data is split into prompt, chosen and rejected rows, as required by HuggingFace's DPO trainer.\nPurchase access to this dataset here. Purchase entitles the user to make use of the dataset for training large language models."
]
|
68d8e79fa32496049cf0f37273a17e6bb4122b49 | # Dataset Card for "Technology_GarageQA"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Muthuchancoach/Technology_GarageQA | [
"region:us"
]
| 2023-11-15T06:51:26+00:00 | {"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 39367, "num_examples": 177}], "download_size": 7655, "dataset_size": 39367}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-20T09:30:14+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "Technology_GarageQA"
More Information needed | [
"# Dataset Card for \"Technology_GarageQA\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"Technology_GarageQA\"\n\nMore Information needed"
]
| [
6,
17
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"Technology_GarageQA\"\n\nMore Information needed"
]
|
8acd41294d4cabaa1f97cb0b63d71d93ae0adad9 | # Dataset Card for "java_encoded_processed_APR"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | JoaoJunior/java_encoded_processed_APR | [
"region:us"
]
| 2023-11-15T06:51:56+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "rem", "dtype": "string"}, {"name": "add", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "meta", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 13607569687, "num_examples": 2377685}, {"name": "test", "num_bytes": 3402025571, "num_examples": 594404}], "download_size": 4737737326, "dataset_size": 17009595258}} | 2023-11-15T06:56:15+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "java_encoded_processed_APR"
More Information needed | [
"# Dataset Card for \"java_encoded_processed_APR\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"java_encoded_processed_APR\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"java_encoded_processed_APR\"\n\nMore Information needed"
]
|
50243aca5ca9b85afcf5dad5269b8e4a9d8bbb8b | # Dataset Card for "python_encoded_processed_APR"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | JoaoJunior/python_encoded_processed_APR | [
"region:us"
]
| 2023-11-15T07:08:50+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "rem", "dtype": "string"}, {"name": "add", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "meta", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 1207242215, "num_examples": 350610}, {"name": "test", "num_bytes": 302037040, "num_examples": 87579}], "download_size": 428669552, "dataset_size": 1509279255}} | 2023-11-15T07:09:21+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "python_encoded_processed_APR"
More Information needed | [
"# Dataset Card for \"python_encoded_processed_APR\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"python_encoded_processed_APR\"\n\nMore Information needed"
]
| [
6,
21
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"python_encoded_processed_APR\"\n\nMore Information needed"
]
|
84dff050c99e74720477706156fd4c08d32f5cb5 | # Dataset Card for "sumair_classification"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | sreejith8100/sumair_classification | [
"region:us"
]
| 2023-11-15T07:14:44+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "Blank", "1": "Marriage_Type1", "2": "Marriage_Type2"}}}}, {"name": "ground_truth", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 55518131.0, "num_examples": 90}, {"name": "test", "num_bytes": 27966902.0, "num_examples": 45}], "download_size": 80999629, "dataset_size": 83485033.0}} | 2023-11-15T07:37:24+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "sumair_classification"
More Information needed | [
"# Dataset Card for \"sumair_classification\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"sumair_classification\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"sumair_classification\"\n\nMore Information needed"
]
|
75c86ca0ad8bcfbe7107c7aaba3fda7bcdc0bebd | # Dataset Card for "german-eval"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | seedboxai/eval-german | [
"region:us"
]
| 2023-11-15T07:15:25+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "arc_challenge", "path": "data/arc_challenge-*"}, {"split": "arc_easy", "path": "data/arc_easy-*"}, {"split": "mmlu", "path": "data/mmlu-*"}, {"split": "tqa", "path": "data/tqa-*"}, {"split": "hellaSwag", "path": "data/hellaSwag-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "answerKey", "dtype": "string"}, {"name": "source_eval_dataset", "dtype": "string"}, {"name": "prompt_id", "dtype": "string"}, {"name": "eval_prompt", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "arc_challenge", "num_bytes": 221008, "num_examples": 295}, {"name": "arc_easy", "num_bytes": 354359, "num_examples": 567}, {"name": "mmlu", "num_bytes": 298547, "num_examples": 285}, {"name": "tqa", "num_bytes": 511912, "num_examples": 684}, {"name": "hellaSwag", "num_bytes": 1003982, "num_examples": 1000}], "download_size": 1334333, "dataset_size": 2389808}} | 2023-11-15T07:34:51+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "german-eval"
More Information needed | [
"# Dataset Card for \"german-eval\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"german-eval\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"german-eval\"\n\nMore Information needed"
]
|
6102b55f1ad89e897ecad854dacaaf2e4cd77705 | # Dataset Card for "qrcode_n"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | surabhiMV/qrcode_n | [
"region:us"
]
| 2023-11-15T07:16:19+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}, {"name": "bbox", "sequence": {"sequence": {"sequence": "float64"}}}], "splits": [{"name": "train", "num_bytes": 18271607.0, "num_examples": 502}], "download_size": 17289874, "dataset_size": 18271607.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T07:16:23+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "qrcode_n"
More Information needed | [
"# Dataset Card for \"qrcode_n\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"qrcode_n\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"qrcode_n\"\n\nMore Information needed"
]
|
4e7dc3e44db7619a35a1fc4a6446ac06f550168a | # Dataset Card for "qrcode_val_n"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | surabhiMV/qrcode_val_n | [
"region:us"
]
| 2023-11-15T07:16:30+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}, {"name": "bbox", "sequence": {"sequence": {"sequence": "float64"}}}], "splits": [{"name": "train", "num_bytes": 2138785.0, "num_examples": 60}], "download_size": 2038537, "dataset_size": 2138785.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T07:16:32+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "qrcode_val_n"
More Information needed | [
"# Dataset Card for \"qrcode_val_n\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"qrcode_val_n\"\n\nMore Information needed"
]
| [
6,
17
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"qrcode_val_n\"\n\nMore Information needed"
]
|
e4e44b67a4690d6de2dd4e92f8e00c4483d3176e |
# LAION COCO with aesthetic score and watermark score
This dataset contains 10% samples of the [LAION-COCO](https://huggingface.co/datasets/laion/laion-coco) dataset filtered by some text rules (*remove url, special tokens*, etc.), and image rules (*image size > 384x384*, *aesthetic score>4.75* and *watermark probability<0.5*). There are total 8,563,753 data instances in this dataset. And the corresponding [aesthetic score](https://github.com/christophschuhmann/improved-aesthetic-predictor/tree/main) and [watermark score](https://github.com/LAION-AI/LAION-5B-WatermarkDetection/tree/main) are also included.
Noted: watermark score in the table means the probability of the existence of the watermark in the image. | guangyil/laion-coco-aesthetic | [
"task_categories:image-to-text",
"task_categories:text-to-image",
"size_categories:1M<n<10M",
"language:en",
"license:apache-2.0",
"laion",
"region:us"
]
| 2023-11-15T07:24:06+00:00 | {"language": ["en"], "license": "apache-2.0", "size_categories": ["1M<n<10M"], "task_categories": ["image-to-text", "text-to-image"], "pretty_name": "s", "tags": ["laion"]} | 2023-11-15T10:34:11+00:00 | []
| [
"en"
]
| TAGS
#task_categories-image-to-text #task_categories-text-to-image #size_categories-1M<n<10M #language-English #license-apache-2.0 #laion #region-us
|
# LAION COCO with aesthetic score and watermark score
This dataset contains 10% samples of the LAION-COCO dataset filtered by some text rules (*remove url, special tokens*, etc.), and image rules (*image size > 384x384*, *aesthetic score>4.75* and *watermark probability<0.5*). There are total 8,563,753 data instances in this dataset. And the corresponding aesthetic score and watermark score are also included.
Noted: watermark score in the table means the probability of the existence of the watermark in the image. | [
"# LAION COCO with aesthetic score and watermark score\n\nThis dataset contains 10% samples of the LAION-COCO dataset filtered by some text rules (*remove url, special tokens*, etc.), and image rules (*image size > 384x384*, *aesthetic score>4.75* and *watermark probability<0.5*). There are total 8,563,753 data instances in this dataset. And the corresponding aesthetic score and watermark score are also included. \n\n\nNoted: watermark score in the table means the probability of the existence of the watermark in the image."
]
| [
"TAGS\n#task_categories-image-to-text #task_categories-text-to-image #size_categories-1M<n<10M #language-English #license-apache-2.0 #laion #region-us \n",
"# LAION COCO with aesthetic score and watermark score\n\nThis dataset contains 10% samples of the LAION-COCO dataset filtered by some text rules (*remove url, special tokens*, etc.), and image rules (*image size > 384x384*, *aesthetic score>4.75* and *watermark probability<0.5*). There are total 8,563,753 data instances in this dataset. And the corresponding aesthetic score and watermark score are also included. \n\n\nNoted: watermark score in the table means the probability of the existence of the watermark in the image."
]
| [
57,
141
]
| [
"passage: TAGS\n#task_categories-image-to-text #task_categories-text-to-image #size_categories-1M<n<10M #language-English #license-apache-2.0 #laion #region-us \n# LAION COCO with aesthetic score and watermark score\n\nThis dataset contains 10% samples of the LAION-COCO dataset filtered by some text rules (*remove url, special tokens*, etc.), and image rules (*image size > 384x384*, *aesthetic score>4.75* and *watermark probability<0.5*). There are total 8,563,753 data instances in this dataset. And the corresponding aesthetic score and watermark score are also included. \n\n\nNoted: watermark score in the table means the probability of the existence of the watermark in the image."
]
|
91bafe15fbed9c401c6004d03f500c44cc2a2b36 | # Dataset Card for "python_java_dataset_APR"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | JoaoJunior/python_java_dataset_APR | [
"region:us"
]
| 2023-11-15T07:25:44+00:00 | {"dataset_info": {"features": [{"name": "rem", "dtype": "string"}, {"name": "add", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "meta", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 14814811902, "num_examples": 2728295}, {"name": "test", "num_bytes": 3704062611, "num_examples": 681983}], "download_size": 5172322839, "dataset_size": 18518874513}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]} | 2023-11-15T07:30:33+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "python_java_dataset_APR"
More Information needed | [
"# Dataset Card for \"python_java_dataset_APR\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"python_java_dataset_APR\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"python_java_dataset_APR\"\n\nMore Information needed"
]
|
5382c5bc6be11aebc6c9b90fceba679e7e1c4b5a | # Dataset Card for "huatuo_encyclopedia_qa_364k"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | xDAN-datasets/huatuo_encyclopedia_qa_364k | [
"region:us"
]
| 2023-11-15T07:28:12+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "conversations", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1196521698, "num_examples": 364420}], "download_size": 0, "dataset_size": 1196521698}} | 2023-11-15T07:28:59+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "huatuo_encyclopedia_qa_364k"
More Information needed | [
"# Dataset Card for \"huatuo_encyclopedia_qa_364k\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"huatuo_encyclopedia_qa_364k\"\n\nMore Information needed"
]
| [
6,
21
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"huatuo_encyclopedia_qa_364k\"\n\nMore Information needed"
]
|
9308c865963f597c5a8a68b8ea3e6190c23796cd | # Dataset Card for "qrcode_new"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | surabhiMV/qrcode_new | [
"region:us"
]
| 2023-11-15T07:33:08+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 18225795.0, "num_examples": 502}], "download_size": 17273080, "dataset_size": 18225795.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T07:33:13+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "qrcode_new"
More Information needed | [
"# Dataset Card for \"qrcode_new\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"qrcode_new\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"qrcode_new\"\n\nMore Information needed"
]
|
bfc0f722061a76df35b570bc27ca34164c28e2d9 | # Dataset Card for "qrcode_val_new"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | surabhiMV/qrcode_val_new | [
"region:us"
]
| 2023-11-15T07:33:19+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 2134813.0, "num_examples": 60}], "download_size": 2035796, "dataset_size": 2134813.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T07:33:21+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "qrcode_val_new"
More Information needed | [
"# Dataset Card for \"qrcode_val_new\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"qrcode_val_new\"\n\nMore Information needed"
]
| [
6,
17
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"qrcode_val_new\"\n\nMore Information needed"
]
|
602db98d62ecb0d16d33e37a32cb0aa0f12545b5 | annotations_creators:
- expert-generated
language:
- en
language_creators:
- expert-generated
license:
- other
multilinguality:
- monolingual
pretty_name: Food Nutrition that use to fine tune llm
size_categories:
- 1K<n<10K
source_datasets: []
tags: []
task_categories:
- text-generation
- text2text-generation
task_ids:
- language-modeling
- text-simplification | Roger21/NutritionFineTune_1 | [
"region:us"
]
| 2023-11-15T07:38:57+00:00 | {} | 2023-11-15T08:58:34+00:00 | []
| []
| TAGS
#region-us
| annotations_creators:
- expert-generated
language:
- en
language_creators:
- expert-generated
license:
- other
multilinguality:
- monolingual
pretty_name: Food Nutrition that use to fine tune llm
size_categories:
- 1K<n<10K
source_datasets: []
tags: []
task_categories:
- text-generation
- text2text-generation
task_ids:
- language-modeling
- text-simplification | []
| [
"TAGS\n#region-us \n"
]
| [
6
]
| [
"passage: TAGS\n#region-us \n"
]
|
c84f35b5fd560fd2b41c8b2a13f99171c6227907 | # Dataset Card for "no_robots_FalconChatFormated"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | monsterapi/no_robots_chatformatted_version1 | [
"region:us"
]
| 2023-11-15T07:47:41+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "prompt_id", "dtype": "string"}, {"name": "messages", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "category", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 29092450, "num_examples": 9500}, {"name": "test", "num_bytes": 1560738, "num_examples": 500}], "download_size": 18917122, "dataset_size": 30653188}} | 2023-11-15T07:47:42+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "no_robots_FalconChatFormated"
More Information needed | [
"# Dataset Card for \"no_robots_FalconChatFormated\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"no_robots_FalconChatFormated\"\n\nMore Information needed"
]
| [
6,
21
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"no_robots_FalconChatFormated\"\n\nMore Information needed"
]
|
182f94fb58b596a333127f63cd426fcf15026a0f | # Dataset Card for "no_robots_MistralChatFormated"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | monsterapi/no_robots_chatformatted_version2 | [
"region:us"
]
| 2023-11-15T07:48:00+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "prompt_id", "dtype": "string"}, {"name": "messages", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "category", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 28805395, "num_examples": 9500}, {"name": "test", "num_bytes": 1545168, "num_examples": 500}], "download_size": 18891461, "dataset_size": 30350563}} | 2023-11-15T07:48:02+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "no_robots_MistralChatFormated"
More Information needed | [
"# Dataset Card for \"no_robots_MistralChatFormated\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"no_robots_MistralChatFormated\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"no_robots_MistralChatFormated\"\n\nMore Information needed"
]
|
68ee4729bee115b431bc4ae1e5ffa83a49c11a67 | # Dataset Card for "no_robots_test"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Zangs3011/no_robots_test | [
"region:us"
]
| 2023-11-15T07:49:11+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "prompt_id", "dtype": "string"}, {"name": "messages", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "category", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 29092450, "num_examples": 9500}, {"name": "test", "num_bytes": 1560738, "num_examples": 500}], "download_size": 18917122, "dataset_size": 30653188}} | 2023-11-15T07:49:36+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "no_robots_test"
More Information needed | [
"# Dataset Card for \"no_robots_test\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"no_robots_test\"\n\nMore Information needed"
]
| [
6,
16
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"no_robots_test\"\n\nMore Information needed"
]
|
a39c16e7336e920fb429f48a4a24636bd725013f | # Dataset Card for "no_robots_gpt2ChatFormated"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Zangs3011/no_robots_gpt2ChatFormated | [
"region:us"
]
| 2023-11-15T07:49:59+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "prompt_id", "dtype": "string"}, {"name": "messages", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "category", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 29092450, "num_examples": 9500}, {"name": "test", "num_bytes": 1560738, "num_examples": 500}], "download_size": 18917122, "dataset_size": 30653188}} | 2023-11-15T07:50:01+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "no_robots_gpt2ChatFormated"
More Information needed | [
"# Dataset Card for \"no_robots_gpt2ChatFormated\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"no_robots_gpt2ChatFormated\"\n\nMore Information needed"
]
| [
6,
21
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"no_robots_gpt2ChatFormated\"\n\nMore Information needed"
]
|
61aac536482e050bbfb33528b33bfe5056cc73c6 | # Dataset Card for "stt_data_2311152"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | sunglyul/stt_data_2311152 | [
"region:us"
]
| 2023-11-15T08:00:59+00:00 | {"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "transcript", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 76509.5, "num_examples": 6}, {"name": "test", "num_bytes": 18911.25, "num_examples": 1}, {"name": "valid", "num_bytes": 8272.25, "num_examples": 1}], "download_size": 91071, "dataset_size": 103693.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "valid", "path": "data/valid-*"}]}]} | 2023-11-17T08:46:24+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "stt_data_2311152"
More Information needed | [
"# Dataset Card for \"stt_data_2311152\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"stt_data_2311152\"\n\nMore Information needed"
]
| [
6,
18
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"stt_data_2311152\"\n\nMore Information needed"
]
|
2c5bb35b8dab116277b185a633ed350439d0860d | # Dataset Card for "qrcode_new_train"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | surabhiMV/qrcode_new_train | [
"region:us"
]
| 2023-11-15T08:08:34+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 13629030.0, "num_examples": 352}], "download_size": 12896919, "dataset_size": 13629030.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T08:08:38+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "qrcode_new_train"
More Information needed | [
"# Dataset Card for \"qrcode_new_train\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"qrcode_new_train\"\n\nMore Information needed"
]
| [
6,
18
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"qrcode_new_train\"\n\nMore Information needed"
]
|
1953244ef1bbcf3a02dd6e9e2eafa4afdb29f721 | # Dataset Card for "qrcode_val_new_train"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | surabhiMV/qrcode_val_new_train | [
"region:us"
]
| 2023-11-15T08:08:47+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1623749.0, "num_examples": 41}], "download_size": 1563056, "dataset_size": 1623749.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T08:08:49+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "qrcode_val_new_train"
More Information needed | [
"# Dataset Card for \"qrcode_val_new_train\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"qrcode_val_new_train\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"qrcode_val_new_train\"\n\nMore Information needed"
]
|
424e34fba86db61badbb387e24c87920ea4c93ba | # Dataset Card for "dolly-15k-oai-style"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | philschmid/dolly-15k-oai-style | [
"region:us"
]
| 2023-11-15T08:09:58+00:00 | {"dataset_info": {"features": [{"name": "messages", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 12278400, "num_examples": 15011}], "download_size": 7243728, "dataset_size": 12278400}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T08:14:46+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "dolly-15k-oai-style"
More Information needed | [
"# Dataset Card for \"dolly-15k-oai-style\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"dolly-15k-oai-style\"\n\nMore Information needed"
]
| [
6,
19
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"dolly-15k-oai-style\"\n\nMore Information needed"
]
|
e445adfd87cd369bd6fc8830dfda48db3386840d | # Dataset Card for "glaive_code_assistant_140K"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | xDAN-datasets/glaive_code_assistant_140K | [
"region:us"
]
| 2023-11-15T08:11:00+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "conversations", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 417459108, "num_examples": 136109}], "download_size": 0, "dataset_size": 417459108}} | 2023-11-15T08:11:26+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "glaive_code_assistant_140K"
More Information needed | [
"# Dataset Card for \"glaive_code_assistant_140K\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"glaive_code_assistant_140K\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"glaive_code_assistant_140K\"\n\nMore Information needed"
]
|
b7435085fa0108b20b7b5f08307cd8ef7eb11f36 |
# Dataset Card for Evaluation run of Undi95/Llama2-13B-no_robots-alpaca-lora
## Dataset Description
- **Homepage:**
- **Repository:** https://huggingface.co/Undi95/Llama2-13B-no_robots-alpaca-lora
- **Paper:**
- **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard
- **Point of Contact:** [email protected]
### Dataset Summary
Dataset automatically created during the evaluation run of model [Undi95/Llama2-13B-no_robots-alpaca-lora](https://huggingface.co/Undi95/Llama2-13B-no_robots-alpaca-lora) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).
The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset("open-llm-leaderboard/details_Undi95__Llama2-13B-no_robots-alpaca-lora_public",
"harness_winogrande_5",
split="train")
```
## Latest results
These are the [latest results from run 2023-11-15T08:15:04.836039](https://huggingface.co/datasets/open-llm-leaderboard/details_Undi95__Llama2-13B-no_robots-alpaca-lora_public/blob/main/results_2023-11-15T08-15-04.836039.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"acc": 0.5288556443369928,
"acc_stderr": 0.03390383953418472,
"acc_norm": 0.5370018287535696,
"acc_norm_stderr": 0.034712721572579625,
"mc1": 0.28151774785801714,
"mc1_stderr": 0.01574402724825605,
"mc2": 0.4045559753787184,
"mc2_stderr": 0.01423646056016957,
"em": 0.031774328859060404,
"em_stderr": 0.0017962473521312278,
"f1": 0.09261220637583845,
"f1_stderr": 0.0021550523797604715
},
"harness|arc:challenge|25": {
"acc": 0.5418088737201365,
"acc_stderr": 0.014560220308714695,
"acc_norm": 0.5887372013651877,
"acc_norm_stderr": 0.014379441068522082
},
"harness|hellaswag|10": {
"acc": 0.6309500099581756,
"acc_stderr": 0.004815613144385403,
"acc_norm": 0.8243377813184625,
"acc_norm_stderr": 0.003797548252851636
},
"harness|hendrycksTest-abstract_algebra|5": {
"acc": 0.34,
"acc_stderr": 0.047609522856952365,
"acc_norm": 0.34,
"acc_norm_stderr": 0.047609522856952365
},
"harness|hendrycksTest-anatomy|5": {
"acc": 0.4888888888888889,
"acc_stderr": 0.04318275491977976,
"acc_norm": 0.4888888888888889,
"acc_norm_stderr": 0.04318275491977976
},
"harness|hendrycksTest-astronomy|5": {
"acc": 0.5328947368421053,
"acc_stderr": 0.040601270352363966,
"acc_norm": 0.5328947368421053,
"acc_norm_stderr": 0.040601270352363966
},
"harness|hendrycksTest-business_ethics|5": {
"acc": 0.53,
"acc_stderr": 0.05016135580465919,
"acc_norm": 0.53,
"acc_norm_stderr": 0.05016135580465919
},
"harness|hendrycksTest-clinical_knowledge|5": {
"acc": 0.5660377358490566,
"acc_stderr": 0.030503292013342592,
"acc_norm": 0.5660377358490566,
"acc_norm_stderr": 0.030503292013342592
},
"harness|hendrycksTest-college_biology|5": {
"acc": 0.5625,
"acc_stderr": 0.04148415739394154,
"acc_norm": 0.5625,
"acc_norm_stderr": 0.04148415739394154
},
"harness|hendrycksTest-college_chemistry|5": {
"acc": 0.39,
"acc_stderr": 0.04902071300001975,
"acc_norm": 0.39,
"acc_norm_stderr": 0.04902071300001975
},
"harness|hendrycksTest-college_computer_science|5": {
"acc": 0.44,
"acc_stderr": 0.04988876515698589,
"acc_norm": 0.44,
"acc_norm_stderr": 0.04988876515698589
},
"harness|hendrycksTest-college_mathematics|5": {
"acc": 0.28,
"acc_stderr": 0.04512608598542129,
"acc_norm": 0.28,
"acc_norm_stderr": 0.04512608598542129
},
"harness|hendrycksTest-college_medicine|5": {
"acc": 0.49710982658959535,
"acc_stderr": 0.038124005659748335,
"acc_norm": 0.49710982658959535,
"acc_norm_stderr": 0.038124005659748335
},
"harness|hendrycksTest-college_physics|5": {
"acc": 0.23529411764705882,
"acc_stderr": 0.04220773659171452,
"acc_norm": 0.23529411764705882,
"acc_norm_stderr": 0.04220773659171452
},
"harness|hendrycksTest-computer_security|5": {
"acc": 0.66,
"acc_stderr": 0.04760952285695237,
"acc_norm": 0.66,
"acc_norm_stderr": 0.04760952285695237
},
"harness|hendrycksTest-conceptual_physics|5": {
"acc": 0.4127659574468085,
"acc_stderr": 0.03218471141400351,
"acc_norm": 0.4127659574468085,
"acc_norm_stderr": 0.03218471141400351
},
"harness|hendrycksTest-econometrics|5": {
"acc": 0.32456140350877194,
"acc_stderr": 0.04404556157374768,
"acc_norm": 0.32456140350877194,
"acc_norm_stderr": 0.04404556157374768
},
"harness|hendrycksTest-electrical_engineering|5": {
"acc": 0.5379310344827586,
"acc_stderr": 0.04154659671707548,
"acc_norm": 0.5379310344827586,
"acc_norm_stderr": 0.04154659671707548
},
"harness|hendrycksTest-elementary_mathematics|5": {
"acc": 0.3492063492063492,
"acc_stderr": 0.024552292209342654,
"acc_norm": 0.3492063492063492,
"acc_norm_stderr": 0.024552292209342654
},
"harness|hendrycksTest-formal_logic|5": {
"acc": 0.2619047619047619,
"acc_stderr": 0.039325376803928704,
"acc_norm": 0.2619047619047619,
"acc_norm_stderr": 0.039325376803928704
},
"harness|hendrycksTest-global_facts|5": {
"acc": 0.36,
"acc_stderr": 0.04824181513244218,
"acc_norm": 0.36,
"acc_norm_stderr": 0.04824181513244218
},
"harness|hendrycksTest-high_school_biology|5": {
"acc": 0.6258064516129033,
"acc_stderr": 0.027528904299845704,
"acc_norm": 0.6258064516129033,
"acc_norm_stderr": 0.027528904299845704
},
"harness|hendrycksTest-high_school_chemistry|5": {
"acc": 0.4630541871921182,
"acc_stderr": 0.035083705204426656,
"acc_norm": 0.4630541871921182,
"acc_norm_stderr": 0.035083705204426656
},
"harness|hendrycksTest-high_school_computer_science|5": {
"acc": 0.5,
"acc_stderr": 0.050251890762960605,
"acc_norm": 0.5,
"acc_norm_stderr": 0.050251890762960605
},
"harness|hendrycksTest-high_school_european_history|5": {
"acc": 0.6242424242424243,
"acc_stderr": 0.037818873532059816,
"acc_norm": 0.6242424242424243,
"acc_norm_stderr": 0.037818873532059816
},
"harness|hendrycksTest-high_school_geography|5": {
"acc": 0.6464646464646465,
"acc_stderr": 0.03406086723547155,
"acc_norm": 0.6464646464646465,
"acc_norm_stderr": 0.03406086723547155
},
"harness|hendrycksTest-high_school_government_and_politics|5": {
"acc": 0.7564766839378239,
"acc_stderr": 0.030975436386845454,
"acc_norm": 0.7564766839378239,
"acc_norm_stderr": 0.030975436386845454
},
"harness|hendrycksTest-high_school_macroeconomics|5": {
"acc": 0.5205128205128206,
"acc_stderr": 0.02532966316348994,
"acc_norm": 0.5205128205128206,
"acc_norm_stderr": 0.02532966316348994
},
"harness|hendrycksTest-high_school_mathematics|5": {
"acc": 0.24444444444444444,
"acc_stderr": 0.02620276653465215,
"acc_norm": 0.24444444444444444,
"acc_norm_stderr": 0.02620276653465215
},
"harness|hendrycksTest-high_school_microeconomics|5": {
"acc": 0.5588235294117647,
"acc_stderr": 0.032252942323996406,
"acc_norm": 0.5588235294117647,
"acc_norm_stderr": 0.032252942323996406
},
"harness|hendrycksTest-high_school_physics|5": {
"acc": 0.31788079470198677,
"acc_stderr": 0.03802039760107903,
"acc_norm": 0.31788079470198677,
"acc_norm_stderr": 0.03802039760107903
},
"harness|hendrycksTest-high_school_psychology|5": {
"acc": 0.6844036697247706,
"acc_stderr": 0.019926117513869666,
"acc_norm": 0.6844036697247706,
"acc_norm_stderr": 0.019926117513869666
},
"harness|hendrycksTest-high_school_statistics|5": {
"acc": 0.4212962962962963,
"acc_stderr": 0.03367462138896079,
"acc_norm": 0.4212962962962963,
"acc_norm_stderr": 0.03367462138896079
},
"harness|hendrycksTest-high_school_us_history|5": {
"acc": 0.7205882352941176,
"acc_stderr": 0.031493281045079556,
"acc_norm": 0.7205882352941176,
"acc_norm_stderr": 0.031493281045079556
},
"harness|hendrycksTest-high_school_world_history|5": {
"acc": 0.7383966244725738,
"acc_stderr": 0.028609516716994934,
"acc_norm": 0.7383966244725738,
"acc_norm_stderr": 0.028609516716994934
},
"harness|hendrycksTest-human_aging|5": {
"acc": 0.6502242152466368,
"acc_stderr": 0.03200736719484503,
"acc_norm": 0.6502242152466368,
"acc_norm_stderr": 0.03200736719484503
},
"harness|hendrycksTest-human_sexuality|5": {
"acc": 0.6106870229007634,
"acc_stderr": 0.04276486542814591,
"acc_norm": 0.6106870229007634,
"acc_norm_stderr": 0.04276486542814591
},
"harness|hendrycksTest-international_law|5": {
"acc": 0.71900826446281,
"acc_stderr": 0.041032038305145124,
"acc_norm": 0.71900826446281,
"acc_norm_stderr": 0.041032038305145124
},
"harness|hendrycksTest-jurisprudence|5": {
"acc": 0.6111111111111112,
"acc_stderr": 0.0471282125742677,
"acc_norm": 0.6111111111111112,
"acc_norm_stderr": 0.0471282125742677
},
"harness|hendrycksTest-logical_fallacies|5": {
"acc": 0.6625766871165644,
"acc_stderr": 0.03714908409935574,
"acc_norm": 0.6625766871165644,
"acc_norm_stderr": 0.03714908409935574
},
"harness|hendrycksTest-machine_learning|5": {
"acc": 0.2857142857142857,
"acc_stderr": 0.042878587513404544,
"acc_norm": 0.2857142857142857,
"acc_norm_stderr": 0.042878587513404544
},
"harness|hendrycksTest-management|5": {
"acc": 0.7281553398058253,
"acc_stderr": 0.044052680241409216,
"acc_norm": 0.7281553398058253,
"acc_norm_stderr": 0.044052680241409216
},
"harness|hendrycksTest-marketing|5": {
"acc": 0.7350427350427351,
"acc_stderr": 0.028911208802749486,
"acc_norm": 0.7350427350427351,
"acc_norm_stderr": 0.028911208802749486
},
"harness|hendrycksTest-medical_genetics|5": {
"acc": 0.62,
"acc_stderr": 0.048783173121456316,
"acc_norm": 0.62,
"acc_norm_stderr": 0.048783173121456316
},
"harness|hendrycksTest-miscellaneous|5": {
"acc": 0.7266922094508301,
"acc_stderr": 0.015936681062628556,
"acc_norm": 0.7266922094508301,
"acc_norm_stderr": 0.015936681062628556
},
"harness|hendrycksTest-moral_disputes|5": {
"acc": 0.615606936416185,
"acc_stderr": 0.026189666966272035,
"acc_norm": 0.615606936416185,
"acc_norm_stderr": 0.026189666966272035
},
"harness|hendrycksTest-moral_scenarios|5": {
"acc": 0.26927374301675977,
"acc_stderr": 0.014835616582882611,
"acc_norm": 0.26927374301675977,
"acc_norm_stderr": 0.014835616582882611
},
"harness|hendrycksTest-nutrition|5": {
"acc": 0.5555555555555556,
"acc_stderr": 0.02845263998508801,
"acc_norm": 0.5555555555555556,
"acc_norm_stderr": 0.02845263998508801
},
"harness|hendrycksTest-philosophy|5": {
"acc": 0.6141479099678456,
"acc_stderr": 0.027648149599751468,
"acc_norm": 0.6141479099678456,
"acc_norm_stderr": 0.027648149599751468
},
"harness|hendrycksTest-prehistory|5": {
"acc": 0.6172839506172839,
"acc_stderr": 0.02704453813840261,
"acc_norm": 0.6172839506172839,
"acc_norm_stderr": 0.02704453813840261
},
"harness|hendrycksTest-professional_accounting|5": {
"acc": 0.41843971631205673,
"acc_stderr": 0.02942799403941999,
"acc_norm": 0.41843971631205673,
"acc_norm_stderr": 0.02942799403941999
},
"harness|hendrycksTest-professional_law|5": {
"acc": 0.41134289439374183,
"acc_stderr": 0.012567882673803685,
"acc_norm": 0.41134289439374183,
"acc_norm_stderr": 0.012567882673803685
},
"harness|hendrycksTest-professional_medicine|5": {
"acc": 0.4338235294117647,
"acc_stderr": 0.030105636570016633,
"acc_norm": 0.4338235294117647,
"acc_norm_stderr": 0.030105636570016633
},
"harness|hendrycksTest-professional_psychology|5": {
"acc": 0.5294117647058824,
"acc_stderr": 0.02019280827143379,
"acc_norm": 0.5294117647058824,
"acc_norm_stderr": 0.02019280827143379
},
"harness|hendrycksTest-public_relations|5": {
"acc": 0.5363636363636364,
"acc_stderr": 0.04776449162396197,
"acc_norm": 0.5363636363636364,
"acc_norm_stderr": 0.04776449162396197
},
"harness|hendrycksTest-security_studies|5": {
"acc": 0.6163265306122448,
"acc_stderr": 0.031130880396235933,
"acc_norm": 0.6163265306122448,
"acc_norm_stderr": 0.031130880396235933
},
"harness|hendrycksTest-sociology|5": {
"acc": 0.7412935323383084,
"acc_stderr": 0.030965903123573033,
"acc_norm": 0.7412935323383084,
"acc_norm_stderr": 0.030965903123573033
},
"harness|hendrycksTest-us_foreign_policy|5": {
"acc": 0.74,
"acc_stderr": 0.0440844002276808,
"acc_norm": 0.74,
"acc_norm_stderr": 0.0440844002276808
},
"harness|hendrycksTest-virology|5": {
"acc": 0.4819277108433735,
"acc_stderr": 0.038899512528272166,
"acc_norm": 0.4819277108433735,
"acc_norm_stderr": 0.038899512528272166
},
"harness|hendrycksTest-world_religions|5": {
"acc": 0.7485380116959064,
"acc_stderr": 0.033275044238468436,
"acc_norm": 0.7485380116959064,
"acc_norm_stderr": 0.033275044238468436
},
"harness|truthfulqa:mc|0": {
"mc1": 0.28151774785801714,
"mc1_stderr": 0.01574402724825605,
"mc2": 0.4045559753787184,
"mc2_stderr": 0.01423646056016957
},
"harness|winogrande|5": {
"acc": 0.7529597474348856,
"acc_stderr": 0.012121402942855575
},
"harness|drop|3": {
"em": 0.031774328859060404,
"em_stderr": 0.0017962473521312278,
"f1": 0.09261220637583845,
"f1_stderr": 0.0021550523797604715
},
"harness|gsm8k|5": {
"acc": 0.06444275966641395,
"acc_stderr": 0.006763391728488274
}
}
```
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
[More Information Needed] | open-llm-leaderboard/details_Undi95__Llama2-13B-no_robots-alpaca-lora | [
"region:us"
]
| 2023-11-15T08:18:10+00:00 | {"pretty_name": "Evaluation run of Undi95/Llama2-13B-no_robots-alpaca-lora", "dataset_summary": "Dataset automatically created during the evaluation run of model [Undi95/Llama2-13B-no_robots-alpaca-lora](https://huggingface.co/Undi95/Llama2-13B-no_robots-alpaca-lora) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_Undi95__Llama2-13B-no_robots-alpaca-lora_public\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-11-15T08:15:04.836039](https://huggingface.co/datasets/open-llm-leaderboard/details_Undi95__Llama2-13B-no_robots-alpaca-lora_public/blob/main/results_2023-11-15T08-15-04.836039.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.5288556443369928,\n \"acc_stderr\": 0.03390383953418472,\n \"acc_norm\": 0.5370018287535696,\n \"acc_norm_stderr\": 0.034712721572579625,\n \"mc1\": 0.28151774785801714,\n \"mc1_stderr\": 0.01574402724825605,\n \"mc2\": 0.4045559753787184,\n \"mc2_stderr\": 0.01423646056016957,\n \"em\": 0.031774328859060404,\n \"em_stderr\": 0.0017962473521312278,\n \"f1\": 0.09261220637583845,\n \"f1_stderr\": 0.0021550523797604715\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.5418088737201365,\n \"acc_stderr\": 0.014560220308714695,\n \"acc_norm\": 0.5887372013651877,\n \"acc_norm_stderr\": 0.014379441068522082\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.6309500099581756,\n \"acc_stderr\": 0.004815613144385403,\n \"acc_norm\": 0.8243377813184625,\n \"acc_norm_stderr\": 0.003797548252851636\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.34,\n \"acc_stderr\": 0.047609522856952365,\n \"acc_norm\": 0.34,\n \"acc_norm_stderr\": 0.047609522856952365\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.4888888888888889,\n \"acc_stderr\": 0.04318275491977976,\n \"acc_norm\": 0.4888888888888889,\n \"acc_norm_stderr\": 0.04318275491977976\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.5328947368421053,\n \"acc_stderr\": 0.040601270352363966,\n \"acc_norm\": 0.5328947368421053,\n \"acc_norm_stderr\": 0.040601270352363966\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.53,\n \"acc_stderr\": 0.05016135580465919,\n \"acc_norm\": 0.53,\n \"acc_norm_stderr\": 0.05016135580465919\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.5660377358490566,\n \"acc_stderr\": 0.030503292013342592,\n \"acc_norm\": 0.5660377358490566,\n \"acc_norm_stderr\": 0.030503292013342592\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.5625,\n \"acc_stderr\": 0.04148415739394154,\n \"acc_norm\": 0.5625,\n \"acc_norm_stderr\": 0.04148415739394154\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.39,\n \"acc_stderr\": 0.04902071300001975,\n \"acc_norm\": 0.39,\n \"acc_norm_stderr\": 0.04902071300001975\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.44,\n \"acc_stderr\": 0.04988876515698589,\n \"acc_norm\": 0.44,\n \"acc_norm_stderr\": 0.04988876515698589\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.28,\n \"acc_stderr\": 0.04512608598542129,\n \"acc_norm\": 0.28,\n \"acc_norm_stderr\": 0.04512608598542129\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.49710982658959535,\n \"acc_stderr\": 0.038124005659748335,\n \"acc_norm\": 0.49710982658959535,\n \"acc_norm_stderr\": 0.038124005659748335\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.23529411764705882,\n \"acc_stderr\": 0.04220773659171452,\n \"acc_norm\": 0.23529411764705882,\n \"acc_norm_stderr\": 0.04220773659171452\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.66,\n \"acc_stderr\": 0.04760952285695237,\n \"acc_norm\": 0.66,\n \"acc_norm_stderr\": 0.04760952285695237\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.4127659574468085,\n \"acc_stderr\": 0.03218471141400351,\n \"acc_norm\": 0.4127659574468085,\n \"acc_norm_stderr\": 0.03218471141400351\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.32456140350877194,\n \"acc_stderr\": 0.04404556157374768,\n \"acc_norm\": 0.32456140350877194,\n \"acc_norm_stderr\": 0.04404556157374768\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5379310344827586,\n \"acc_stderr\": 0.04154659671707548,\n \"acc_norm\": 0.5379310344827586,\n \"acc_norm_stderr\": 0.04154659671707548\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.3492063492063492,\n \"acc_stderr\": 0.024552292209342654,\n \"acc_norm\": 0.3492063492063492,\n \"acc_norm_stderr\": 0.024552292209342654\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.2619047619047619,\n \"acc_stderr\": 0.039325376803928704,\n \"acc_norm\": 0.2619047619047619,\n \"acc_norm_stderr\": 0.039325376803928704\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.36,\n \"acc_stderr\": 0.04824181513244218,\n \"acc_norm\": 0.36,\n \"acc_norm_stderr\": 0.04824181513244218\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.6258064516129033,\n \"acc_stderr\": 0.027528904299845704,\n \"acc_norm\": 0.6258064516129033,\n \"acc_norm_stderr\": 0.027528904299845704\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.4630541871921182,\n \"acc_stderr\": 0.035083705204426656,\n \"acc_norm\": 0.4630541871921182,\n \"acc_norm_stderr\": 0.035083705204426656\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.5,\n \"acc_stderr\": 0.050251890762960605,\n \"acc_norm\": 0.5,\n \"acc_norm_stderr\": 0.050251890762960605\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.6242424242424243,\n \"acc_stderr\": 0.037818873532059816,\n \"acc_norm\": 0.6242424242424243,\n \"acc_norm_stderr\": 0.037818873532059816\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.6464646464646465,\n \"acc_stderr\": 0.03406086723547155,\n \"acc_norm\": 0.6464646464646465,\n \"acc_norm_stderr\": 0.03406086723547155\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.7564766839378239,\n \"acc_stderr\": 0.030975436386845454,\n \"acc_norm\": 0.7564766839378239,\n \"acc_norm_stderr\": 0.030975436386845454\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.5205128205128206,\n \"acc_stderr\": 0.02532966316348994,\n \"acc_norm\": 0.5205128205128206,\n \"acc_norm_stderr\": 0.02532966316348994\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.24444444444444444,\n \"acc_stderr\": 0.02620276653465215,\n \"acc_norm\": 0.24444444444444444,\n \"acc_norm_stderr\": 0.02620276653465215\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.5588235294117647,\n \"acc_stderr\": 0.032252942323996406,\n \"acc_norm\": 0.5588235294117647,\n \"acc_norm_stderr\": 0.032252942323996406\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.31788079470198677,\n \"acc_stderr\": 0.03802039760107903,\n \"acc_norm\": 0.31788079470198677,\n \"acc_norm_stderr\": 0.03802039760107903\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.6844036697247706,\n \"acc_stderr\": 0.019926117513869666,\n \"acc_norm\": 0.6844036697247706,\n \"acc_norm_stderr\": 0.019926117513869666\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.4212962962962963,\n \"acc_stderr\": 0.03367462138896079,\n \"acc_norm\": 0.4212962962962963,\n \"acc_norm_stderr\": 0.03367462138896079\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.7205882352941176,\n \"acc_stderr\": 0.031493281045079556,\n \"acc_norm\": 0.7205882352941176,\n \"acc_norm_stderr\": 0.031493281045079556\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.7383966244725738,\n \"acc_stderr\": 0.028609516716994934,\n \"acc_norm\": 0.7383966244725738,\n \"acc_norm_stderr\": 0.028609516716994934\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6502242152466368,\n \"acc_stderr\": 0.03200736719484503,\n \"acc_norm\": 0.6502242152466368,\n \"acc_norm_stderr\": 0.03200736719484503\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.6106870229007634,\n \"acc_stderr\": 0.04276486542814591,\n \"acc_norm\": 0.6106870229007634,\n \"acc_norm_stderr\": 0.04276486542814591\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.71900826446281,\n \"acc_stderr\": 0.041032038305145124,\n \"acc_norm\": 0.71900826446281,\n \"acc_norm_stderr\": 0.041032038305145124\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.6111111111111112,\n \"acc_stderr\": 0.0471282125742677,\n \"acc_norm\": 0.6111111111111112,\n \"acc_norm_stderr\": 0.0471282125742677\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.6625766871165644,\n \"acc_stderr\": 0.03714908409935574,\n \"acc_norm\": 0.6625766871165644,\n \"acc_norm_stderr\": 0.03714908409935574\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.2857142857142857,\n \"acc_stderr\": 0.042878587513404544,\n \"acc_norm\": 0.2857142857142857,\n \"acc_norm_stderr\": 0.042878587513404544\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.7281553398058253,\n \"acc_stderr\": 0.044052680241409216,\n \"acc_norm\": 0.7281553398058253,\n \"acc_norm_stderr\": 0.044052680241409216\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.7350427350427351,\n \"acc_stderr\": 0.028911208802749486,\n \"acc_norm\": 0.7350427350427351,\n \"acc_norm_stderr\": 0.028911208802749486\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.62,\n \"acc_stderr\": 0.048783173121456316,\n \"acc_norm\": 0.62,\n \"acc_norm_stderr\": 0.048783173121456316\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.7266922094508301,\n \"acc_stderr\": 0.015936681062628556,\n \"acc_norm\": 0.7266922094508301,\n \"acc_norm_stderr\": 0.015936681062628556\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.615606936416185,\n \"acc_stderr\": 0.026189666966272035,\n \"acc_norm\": 0.615606936416185,\n \"acc_norm_stderr\": 0.026189666966272035\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.26927374301675977,\n \"acc_stderr\": 0.014835616582882611,\n \"acc_norm\": 0.26927374301675977,\n \"acc_norm_stderr\": 0.014835616582882611\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.5555555555555556,\n \"acc_stderr\": 0.02845263998508801,\n \"acc_norm\": 0.5555555555555556,\n \"acc_norm_stderr\": 0.02845263998508801\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.6141479099678456,\n \"acc_stderr\": 0.027648149599751468,\n \"acc_norm\": 0.6141479099678456,\n \"acc_norm_stderr\": 0.027648149599751468\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.6172839506172839,\n \"acc_stderr\": 0.02704453813840261,\n \"acc_norm\": 0.6172839506172839,\n \"acc_norm_stderr\": 0.02704453813840261\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.41843971631205673,\n \"acc_stderr\": 0.02942799403941999,\n \"acc_norm\": 0.41843971631205673,\n \"acc_norm_stderr\": 0.02942799403941999\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.41134289439374183,\n \"acc_stderr\": 0.012567882673803685,\n \"acc_norm\": 0.41134289439374183,\n \"acc_norm_stderr\": 0.012567882673803685\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.4338235294117647,\n \"acc_stderr\": 0.030105636570016633,\n \"acc_norm\": 0.4338235294117647,\n \"acc_norm_stderr\": 0.030105636570016633\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.5294117647058824,\n \"acc_stderr\": 0.02019280827143379,\n \"acc_norm\": 0.5294117647058824,\n \"acc_norm_stderr\": 0.02019280827143379\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.5363636363636364,\n \"acc_stderr\": 0.04776449162396197,\n \"acc_norm\": 0.5363636363636364,\n \"acc_norm_stderr\": 0.04776449162396197\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.6163265306122448,\n \"acc_stderr\": 0.031130880396235933,\n \"acc_norm\": 0.6163265306122448,\n \"acc_norm_stderr\": 0.031130880396235933\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.7412935323383084,\n \"acc_stderr\": 0.030965903123573033,\n \"acc_norm\": 0.7412935323383084,\n \"acc_norm_stderr\": 0.030965903123573033\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.74,\n \"acc_stderr\": 0.0440844002276808,\n \"acc_norm\": 0.74,\n \"acc_norm_stderr\": 0.0440844002276808\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.4819277108433735,\n \"acc_stderr\": 0.038899512528272166,\n \"acc_norm\": 0.4819277108433735,\n \"acc_norm_stderr\": 0.038899512528272166\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.7485380116959064,\n \"acc_stderr\": 0.033275044238468436,\n \"acc_norm\": 0.7485380116959064,\n \"acc_norm_stderr\": 0.033275044238468436\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.28151774785801714,\n \"mc1_stderr\": 0.01574402724825605,\n \"mc2\": 0.4045559753787184,\n \"mc2_stderr\": 0.01423646056016957\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7529597474348856,\n \"acc_stderr\": 0.012121402942855575\n },\n \"harness|drop|3\": {\n \"em\": 0.031774328859060404,\n \"em_stderr\": 0.0017962473521312278,\n \"f1\": 0.09261220637583845,\n \"f1_stderr\": 0.0021550523797604715\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.06444275966641395,\n \"acc_stderr\": 0.006763391728488274\n }\n}\n```", "repo_url": "https://huggingface.co/Undi95/Llama2-13B-no_robots-alpaca-lora", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|arc:challenge|25_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|drop|3_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|gsm8k|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hellaswag|10_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-management|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-virology|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-management|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-virology|5_2023-11-15T08-15-04.836039.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-management|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-virology|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|truthfulqa:mc|0_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["**/details_harness|winogrande|5_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-11-15T08-15-04.836039.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_11_15T08_15_04.836039", "path": ["results_2023-11-15T08-15-04.836039.parquet"]}, {"split": "latest", "path": ["results_2023-11-15T08-15-04.836039.parquet"]}]}]} | 2023-11-15T08:18:56+00:00 | []
| []
| TAGS
#region-us
|
# Dataset Card for Evaluation run of Undi95/Llama2-13B-no_robots-alpaca-lora
## Dataset Description
- Homepage:
- Repository: URL
- Paper:
- Leaderboard: URL
- Point of Contact: clementine@URL
### Dataset Summary
Dataset automatically created during the evaluation run of model Undi95/Llama2-13B-no_robots-alpaca-lora on the Open LLM Leaderboard.
The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).
To load the details from a run, you can for instance do the following:
## Latest results
These are the latest results from run 2023-11-15T08:15:04.836039(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
### Supported Tasks and Leaderboards
### Languages
## Dataset Structure
### Data Instances
### Data Fields
### Data Splits
## Dataset Creation
### Curation Rationale
### Source Data
#### Initial Data Collection and Normalization
#### Who are the source language producers?
### Annotations
#### Annotation process
#### Who are the annotators?
### Personal and Sensitive Information
## Considerations for Using the Data
### Social Impact of Dataset
### Discussion of Biases
### Other Known Limitations
## Additional Information
### Dataset Curators
### Licensing Information
### Contributions
| [
"# Dataset Card for Evaluation run of Undi95/Llama2-13B-no_robots-alpaca-lora",
"## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL",
"### Dataset Summary\n\nDataset automatically created during the evaluation run of model Undi95/Llama2-13B-no_robots-alpaca-lora on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:",
"## Latest results\n\nThese are the latest results from run 2023-11-15T08:15:04.836039(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):",
"### Supported Tasks and Leaderboards",
"### Languages",
"## Dataset Structure",
"### Data Instances",
"### Data Fields",
"### Data Splits",
"## Dataset Creation",
"### Curation Rationale",
"### Source Data",
"#### Initial Data Collection and Normalization",
"#### Who are the source language producers?",
"### Annotations",
"#### Annotation process",
"#### Who are the annotators?",
"### Personal and Sensitive Information",
"## Considerations for Using the Data",
"### Social Impact of Dataset",
"### Discussion of Biases",
"### Other Known Limitations",
"## Additional Information",
"### Dataset Curators",
"### Licensing Information",
"### Contributions"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for Evaluation run of Undi95/Llama2-13B-no_robots-alpaca-lora",
"## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL",
"### Dataset Summary\n\nDataset automatically created during the evaluation run of model Undi95/Llama2-13B-no_robots-alpaca-lora on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:",
"## Latest results\n\nThese are the latest results from run 2023-11-15T08:15:04.836039(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):",
"### Supported Tasks and Leaderboards",
"### Languages",
"## Dataset Structure",
"### Data Instances",
"### Data Fields",
"### Data Splits",
"## Dataset Creation",
"### Curation Rationale",
"### Source Data",
"#### Initial Data Collection and Normalization",
"#### Who are the source language producers?",
"### Annotations",
"#### Annotation process",
"#### Who are the annotators?",
"### Personal and Sensitive Information",
"## Considerations for Using the Data",
"### Social Impact of Dataset",
"### Discussion of Biases",
"### Other Known Limitations",
"## Additional Information",
"### Dataset Curators",
"### Licensing Information",
"### Contributions"
]
| [
6,
29,
31,
178,
67,
10,
4,
6,
6,
5,
5,
5,
7,
4,
10,
10,
5,
5,
9,
8,
8,
7,
8,
7,
5,
6,
6,
5
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of Undi95/Llama2-13B-no_robots-alpaca-lora## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model Undi95/Llama2-13B-no_robots-alpaca-lora on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-11-15T08:15:04.836039(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions"
]
|
aec660e7a7166c6bc0abfd9aad6a58a9888cbea2 | # Dataset Card for "qrcode_new_t"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | surabhiMV/qrcode_new_t | [
"region:us"
]
| 2023-11-15T08:37:00+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 27849041.0, "num_examples": 352}], "download_size": 0, "dataset_size": 27849041.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T09:18:12+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "qrcode_new_t"
More Information needed | [
"# Dataset Card for \"qrcode_new_t\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"qrcode_new_t\"\n\nMore Information needed"
]
| [
6,
17
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"qrcode_new_t\"\n\nMore Information needed"
]
|
2e588a527faf42b4d43c56bb2bc4dbebfb97fb5e | # Dataset Card for "qrcode_val_new_tt"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | surabhiMV/qrcode_val_new_tt | [
"region:us"
]
| 2023-11-15T08:37:18+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 3442951.0, "num_examples": 41}], "download_size": 0, "dataset_size": 3442951.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T09:18:20+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "qrcode_val_new_tt"
More Information needed | [
"# Dataset Card for \"qrcode_val_new_tt\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"qrcode_val_new_tt\"\n\nMore Information needed"
]
| [
6,
19
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"qrcode_val_new_tt\"\n\nMore Information needed"
]
|
80e21d01b14b499e0ab6bac30ba28a23498cf06a | # Dataset Card for "SpeechSummarization_How2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | DynamicSuperb/SpeechSummarization_How2 | [
"region:us"
]
| 2023-11-15T08:57:44+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "file", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 7735468571.0, "num_examples": 609}], "download_size": 7728606459, "dataset_size": 7735468571.0}} | 2023-11-15T09:06:42+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "SpeechSummarization_How2"
More Information needed | [
"# Dataset Card for \"SpeechSummarization_How2\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"SpeechSummarization_How2\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"SpeechSummarization_How2\"\n\nMore Information needed"
]
|
a35fbb7dc26c2e00225098b55153044d76ebff4e | # Dataset Card for "ProstT5Dataset"
* **Contributors:** Michael Heinzinger and Konstantin Weissenow, Joaquin Gomez Sanchez and Adrian Henkel, Martin Steinegger and Burkhard Rost
* **Licence:** MIT
## Table of Contents
- [Overview](#overview)
- [Dataset Description](#dataset-description)
- [Data Collection and Annotation](#data-collection-and-annotation)
- [Data Splits](#data-splits)
- [Dataset Structure](#dataset-structure)
- [Data Fields](#data-fields)
- [Data Instances](#data-instances)
- [Data Considerations](#data-considerations)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Overview
The ProstT5Dataset is a curated collection of *tokenized* protein sequences and their corresponding structure sequences (3Di).
It is derived from the [AlphaFold Protein Structure Database](https://alphafold.ebi.ac.uk/) and includes various steps of clustering and quality filtering.
To capture 3D information of the sequence, the [3Di structure string representation](https://www.nature.com/articles/s41587-023-01773-0#Sec2) is leveraged. This format
captures the spatial relationship of each residue to its neighbors in 3D space, effectively translating the 3D information of the sequence.
The sequence tokens are generated using the [ProstT5 Tokenizer](https://huggingface.co/Rostlab/ProstT5).
## Data Fields
- **input_id_x** (3Di Tokens): Corresponding tokenized 3Di structure representation sequences derived from the proteins.
- **input_id_y** (Amino Acid Tokens): Tokenized amino acid sequences of proteins.
## Dataset Description

We compare basic protein properties (sequence length, amino acid composition, 3Di-distribution) between our
dataset (training, validation, test sets) and proteins obtained from the [Protein Data Bank (PDB)](https://www.rcsb.org/). Key findings include similar amino acid distributions across datasets,
an overrepresentation of certain 3Di-tokens (d, v, p) and helical structures in AlphaFold2 predictions compared to PDB, and a tendency for shorter protein
lengths in this dataset (average 206-238) relative to PDB proteins (average 255). The analysis also highlights the relationship between
3Di states and secondary structures, with a notable distinction in strand-related tokens between datasets.
## Data Collection and Annotation
The dataset began with the AlphaFold Protein Structure Database , undergoing a two-step clustering process and one step of quality filtering:
1. *First Clustering:* 214M UniprotKB protein sequences were clustered using MMseqs2, resulting in 52M clusters based on pairwise sequence identity.
2. *Second Clustering:* Foldseek further clustered these proteins into 18.8M clusters, expanded to 18.6M proteins by adding diverse members.
3. *Quality Filtering:* Removed proteins with low pLDDT scores, short lengths, and highly repetitive 3Di-strings. The final training split contains 17M proteins.
## Data Splits
Data splits into train, test, and, validation were created by moving whole clusters (after quality filtering - see above), to either of the sets.
For validation and test, we only kept representatives to avoid bias towards large families.
This resulted in 474 proteins for test, 474 proteins for validation and around 17M proteins for training.
## Citation
```
@article{heinzinger2023prostt5,
title={ProstT5: Bilingual language model for protein sequence and structure},
author={Heinzinger, Michael and Weissenow, Konstantin and Sanchez, Joaquin Gomez and Henkel, Adrian and Steinegger, Martin and Rost, Burkhard},
journal={bioRxiv},
pages={2023--07},
year={2023},
publisher={Cold Spring Harbor Laboratory}
}
```
## Tokens to Character Mapping
| Amino Acid Representation | 3DI | Special Tokens |
|---------------------------|-----------|--------------------|
| 3: A | 128: a | 0: \<pad\> |
| 4: L | 129: l | 1: \</s\> |
| 5: G | 130: g | 2: \<unk\> |
| 6: V | 131: v | 148: \<fold2AA\> |
| 7: S | 132: s | 149: \<AA2fold\> |
| 8: R | 133: r | |
| 9: E | 134: e | |
| 10: D | 135: d | |
| 11: T | 136: t | |
| 12: I | 137: i | |
| 13: P | 138: p | |
| 14: K | 139: k | |
| 15: F | 140: f | |
| 16: Q | 141: q | |
| 17: N | 142: n | |
| 18: Y | 143: y | |
| 19: M | 144: m | |
| 20: H | 145: h | |
| 21: W | 146: w | |
| 22: C | 147: c | |
| 23: X | | |
| 24: B | | |
| 25: O | | |
| 26: U | | |
| 27: Z | | | | Rostlab/ProstT5Dataset | [
"task_categories:text-generation",
"size_categories:10M<n<100M",
"license:mit",
"biology",
"region:us"
]
| 2023-11-15T09:00:52+00:00 | {"license": "mit", "size_categories": ["10M<n<100M"], "task_categories": ["text-generation"], "dataset_info": {"features": [{"name": "input_id_x", "sequence": "int64"}, {"name": "input_id_y", "sequence": "int64"}], "splits": [{"name": "test", "num_bytes": 1087504, "num_examples": 474}, {"name": "valid", "num_bytes": 1124160, "num_examples": 474}, {"name": "train", "num_bytes": 65391887792, "num_examples": 17070828}], "download_size": 810671738, "dataset_size": 65394099456}, "tags": ["biology"]} | 2023-12-04T09:21:46+00:00 | []
| []
| TAGS
#task_categories-text-generation #size_categories-10M<n<100M #license-mit #biology #region-us
| Dataset Card for "ProstT5Dataset"
=================================
* Contributors: Michael Heinzinger and Konstantin Weissenow, Joaquin Gomez Sanchez and Adrian Henkel, Martin Steinegger and Burkhard Rost
* Licence: MIT
Table of Contents
-----------------
* Overview
* Dataset Description
+ Data Collection and Annotation
+ Data Splits
* Dataset Structure
+ Data Fields
+ Data Instances
* Data Considerations
+ Social Impact of Dataset
+ Discussion of Biases
+ Other Known Limitations
* Licensing Information
* Citation Information
* Contributions
Overview
--------
The ProstT5Dataset is a curated collection of *tokenized* protein sequences and their corresponding structure sequences (3Di).
It is derived from the AlphaFold Protein Structure Database and includes various steps of clustering and quality filtering.
To capture 3D information of the sequence, the 3Di structure string representation is leveraged. This format
captures the spatial relationship of each residue to its neighbors in 3D space, effectively translating the 3D information of the sequence.
The sequence tokens are generated using the ProstT5 Tokenizer.
Data Fields
-----------
* input\_id\_x (3Di Tokens): Corresponding tokenized 3Di structure representation sequences derived from the proteins.
* input\_id\_y (Amino Acid Tokens): Tokenized amino acid sequences of proteins.
Dataset Description
-------------------
!image/png
We compare basic protein properties (sequence length, amino acid composition, 3Di-distribution) between our
dataset (training, validation, test sets) and proteins obtained from the Protein Data Bank (PDB). Key findings include similar amino acid distributions across datasets,
an overrepresentation of certain 3Di-tokens (d, v, p) and helical structures in AlphaFold2 predictions compared to PDB, and a tendency for shorter protein
lengths in this dataset (average 206-238) relative to PDB proteins (average 255). The analysis also highlights the relationship between
3Di states and secondary structures, with a notable distinction in strand-related tokens between datasets.
Data Collection and Annotation
------------------------------
The dataset began with the AlphaFold Protein Structure Database , undergoing a two-step clustering process and one step of quality filtering:
1. *First Clustering:* 214M UniprotKB protein sequences were clustered using MMseqs2, resulting in 52M clusters based on pairwise sequence identity.
2. *Second Clustering:* Foldseek further clustered these proteins into 18.8M clusters, expanded to 18.6M proteins by adding diverse members.
3. *Quality Filtering:* Removed proteins with low pLDDT scores, short lengths, and highly repetitive 3Di-strings. The final training split contains 17M proteins.
Data Splits
-----------
Data splits into train, test, and, validation were created by moving whole clusters (after quality filtering - see above), to either of the sets.
For validation and test, we only kept representatives to avoid bias towards large families.
This resulted in 474 proteins for test, 474 proteins for validation and around 17M proteins for training.
Tokens to Character Mapping
---------------------------
Amino Acid Representation: 3: A, 3DI: 128: a, Special Tokens: 0: <pad>
Amino Acid Representation: 4: L, 3DI: 129: l, Special Tokens: 1: </s>
Amino Acid Representation: 5: G, 3DI: 130: g, Special Tokens: 2: <unk>
Amino Acid Representation: 6: V, 3DI: 131: v, Special Tokens: 148: <fold2AA>
Amino Acid Representation: 7: S, 3DI: 132: s, Special Tokens: 149: <AA2fold>
Amino Acid Representation: 8: R, 3DI: 133: r, Special Tokens:
Amino Acid Representation: 9: E, 3DI: 134: e, Special Tokens:
Amino Acid Representation: 10: D, 3DI: 135: d, Special Tokens:
Amino Acid Representation: 11: T, 3DI: 136: t, Special Tokens:
Amino Acid Representation: 12: I, 3DI: 137: i, Special Tokens:
Amino Acid Representation: 13: P, 3DI: 138: p, Special Tokens:
Amino Acid Representation: 14: K, 3DI: 139: k, Special Tokens:
Amino Acid Representation: 15: F, 3DI: 140: f, Special Tokens:
Amino Acid Representation: 16: Q, 3DI: 141: q, Special Tokens:
Amino Acid Representation: 17: N, 3DI: 142: n, Special Tokens:
Amino Acid Representation: 18: Y, 3DI: 143: y, Special Tokens:
Amino Acid Representation: 19: M, 3DI: 144: m, Special Tokens:
Amino Acid Representation: 20: H, 3DI: 145: h, Special Tokens:
Amino Acid Representation: 21: W, 3DI: 146: w, Special Tokens:
Amino Acid Representation: 22: C, 3DI: 147: c, Special Tokens:
Amino Acid Representation: 23: X, 3DI: , Special Tokens:
Amino Acid Representation: 24: B, 3DI: , Special Tokens:
Amino Acid Representation: 25: O, 3DI: , Special Tokens:
Amino Acid Representation: 26: U, 3DI: , Special Tokens:
Amino Acid Representation: 27: Z, 3DI: , Special Tokens:
| []
| [
"TAGS\n#task_categories-text-generation #size_categories-10M<n<100M #license-mit #biology #region-us \n"
]
| [
37
]
| [
"passage: TAGS\n#task_categories-text-generation #size_categories-10M<n<100M #license-mit #biology #region-us \n"
]
|
dfd69a925f9165ed2382ced022e9910f4fbbe90c | # Dataset Card for "ip2p-mini-dog2cat"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | SidXXD/ip2p-mini-dog2cat | [
"region:us"
]
| 2023-11-15T09:07:36+00:00 | {"dataset_info": {"features": [{"name": "original_image", "dtype": "image"}, {"name": "edit_prompt", "dtype": "string"}, {"name": "edited_image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 3037058904.662, "num_examples": 4503}], "download_size": 3017848094, "dataset_size": 3037058904.662}} | 2023-11-15T09:11:54+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "ip2p-mini-dog2cat"
More Information needed | [
"# Dataset Card for \"ip2p-mini-dog2cat\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"ip2p-mini-dog2cat\"\n\nMore Information needed"
]
| [
6,
19
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"ip2p-mini-dog2cat\"\n\nMore Information needed"
]
|
094c668431ab5b22640d7b38340efa2331ee43a9 | # Electricity Demand Dataset
<!-- Provide a quick summary of the dataset. -->
This dataset compiles and harmonizes large body smart meter data, enabling machine learning solutions to address climate challenges.
- **Curated by:** Attila Balint
- **License:** BSD 2-clause "Simplified" licence
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
This smart meter dataset facilitates primarily electricity demand forecasting.
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
The dataset contains three main files.
- data/load.parquet
- data/metadata.parquet
- data/weather.parquet
### data/demand.parquet
This file contains the electricity consumption values and has three columns.
- unique_id: a unique id of the meter
- timestamp: the timestamp of the recording in local time
- y: the electricity consumption in **kWh**
### data/metadata.csv
This file collects the available metadata for the meters. The file contains the folloging columns:
- unique_id: the unique id of the meter
- location_id: a unique id for the location
- latitude: approximate latitude of the building
- longitude: approximate longitude of the building
- building_type: type of the building (e.g. Residential, Hospital, etc.)
### data/weather.parquet
This file contains the collected weather data for all locations. The columns are the following:
- location_id: the unique id for the location
- timestamp: the timestamp of the observation in local time
- temperature: the temperature of air at 2m above the surface of land in **°C**
- dew_point: the temperature to which the air, at 2 metres above the surface of the Earth, would have to be cooled for saturation to occur in **°C**
- pressure: the pressure of the atmosphere at the surface of the Earth, adjusted to the height of mean sea level in **hPa**
- wind_speed: the absolute wind speed at a height of ten metres above the surface of the Earth, in **m/s**
- wind_gust: maximum 3 second wind at 10 m height as defined by WMO, in **m/s**
- wind_bearing: te direction the wind is originates from in **degrees**
- precipitation: the accumulated liquid and frozen water, comprising rain and snow, that falls to the Earth's surface in **mm**
- snow: the accumulated snow that falls to the Earth's surface in **mm**
- cloud_cover: the proportion of a grid box covered by cloud in fractions between 0 to 1
- solar_radiation: the amount of solar radiation that reaches a horizontal plane at the surface of the Earth in **W/m2**
| attila-balint-kul/electricity-demand | [
"task_categories:time-series-forecasting",
"language:en",
"license:bsd-2-clause",
"energy",
"electricity",
"region:us"
]
| 2023-11-15T09:10:27+00:00 | {"language": ["en"], "license": "bsd-2-clause", "task_categories": ["time-series-forecasting"], "pretty_name": "Electricity Demand Dataset", "tags": ["energy", "electricity"], "configs": [{"config_name": "demand", "data_files": "data/demand.parquet"}, {"config_name": "metadata", "data_files": "data/metadata.parquet"}, {"config_name": "weather", "data_files": "data/weather.parquet"}]} | 2023-11-16T08:33:45+00:00 | []
| [
"en"
]
| TAGS
#task_categories-time-series-forecasting #language-English #license-bsd-2-clause #energy #electricity #region-us
| # Electricity Demand Dataset
This dataset compiles and harmonizes large body smart meter data, enabling machine learning solutions to address climate challenges.
- Curated by: Attila Balint
- License: BSD 2-clause "Simplified" licence
## Uses
This smart meter dataset facilitates primarily electricity demand forecasting.
## Dataset Structure
The dataset contains three main files.
- data/load.parquet
- data/metadata.parquet
- data/weather.parquet
### data/demand.parquet
This file contains the electricity consumption values and has three columns.
- unique_id: a unique id of the meter
- timestamp: the timestamp of the recording in local time
- y: the electricity consumption in kWh
### data/URL
This file collects the available metadata for the meters. The file contains the folloging columns:
- unique_id: the unique id of the meter
- location_id: a unique id for the location
- latitude: approximate latitude of the building
- longitude: approximate longitude of the building
- building_type: type of the building (e.g. Residential, Hospital, etc.)
### data/weather.parquet
This file contains the collected weather data for all locations. The columns are the following:
- location_id: the unique id for the location
- timestamp: the timestamp of the observation in local time
- temperature: the temperature of air at 2m above the surface of land in °C
- dew_point: the temperature to which the air, at 2 metres above the surface of the Earth, would have to be cooled for saturation to occur in °C
- pressure: the pressure of the atmosphere at the surface of the Earth, adjusted to the height of mean sea level in hPa
- wind_speed: the absolute wind speed at a height of ten metres above the surface of the Earth, in m/s
- wind_gust: maximum 3 second wind at 10 m height as defined by WMO, in m/s
- wind_bearing: te direction the wind is originates from in degrees
- precipitation: the accumulated liquid and frozen water, comprising rain and snow, that falls to the Earth's surface in mm
- snow: the accumulated snow that falls to the Earth's surface in mm
- cloud_cover: the proportion of a grid box covered by cloud in fractions between 0 to 1
- solar_radiation: the amount of solar radiation that reaches a horizontal plane at the surface of the Earth in W/m2
| [
"# Electricity Demand Dataset\n\n\n\nThis dataset compiles and harmonizes large body smart meter data, enabling machine learning solutions to address climate challenges.\n\n- Curated by: Attila Balint\n- License: BSD 2-clause \"Simplified\" licence",
"## Uses\n\n\nThis smart meter dataset facilitates primarily electricity demand forecasting.",
"## Dataset Structure\n\n\n\nThe dataset contains three main files.\n\n- data/load.parquet\n- data/metadata.parquet\n- data/weather.parquet",
"### data/demand.parquet\n\nThis file contains the electricity consumption values and has three columns.\n\n- unique_id: a unique id of the meter\n- timestamp: the timestamp of the recording in local time\n- y: the electricity consumption in kWh",
"### data/URL\n\nThis file collects the available metadata for the meters. The file contains the folloging columns:\n\n- unique_id: the unique id of the meter\n- location_id: a unique id for the location\n- latitude: approximate latitude of the building\n- longitude: approximate longitude of the building\n- building_type: type of the building (e.g. Residential, Hospital, etc.)",
"### data/weather.parquet\n\nThis file contains the collected weather data for all locations. The columns are the following:\n\n- location_id: the unique id for the location\n- timestamp: the timestamp of the observation in local time\n- temperature: the temperature of air at 2m above the surface of land in °C\n- dew_point: the temperature to which the air, at 2 metres above the surface of the Earth, would have to be cooled for saturation to occur in °C\n- pressure: the pressure of the atmosphere at the surface of the Earth, adjusted to the height of mean sea level in hPa\n- wind_speed: the absolute wind speed at a height of ten metres above the surface of the Earth, in m/s\n- wind_gust: maximum 3 second wind at 10 m height as defined by WMO, in m/s\n- wind_bearing: te direction the wind is originates from in degrees\n- precipitation: the accumulated liquid and frozen water, comprising rain and snow, that falls to the Earth's surface in mm\n- snow: the accumulated snow that falls to the Earth's surface in mm\n- cloud_cover: the proportion of a grid box covered by cloud in fractions between 0 to 1\n- solar_radiation: the amount of solar radiation that reaches a horizontal plane at the surface of the Earth in W/m2"
]
| [
"TAGS\n#task_categories-time-series-forecasting #language-English #license-bsd-2-clause #energy #electricity #region-us \n",
"# Electricity Demand Dataset\n\n\n\nThis dataset compiles and harmonizes large body smart meter data, enabling machine learning solutions to address climate challenges.\n\n- Curated by: Attila Balint\n- License: BSD 2-clause \"Simplified\" licence",
"## Uses\n\n\nThis smart meter dataset facilitates primarily electricity demand forecasting.",
"## Dataset Structure\n\n\n\nThe dataset contains three main files.\n\n- data/load.parquet\n- data/metadata.parquet\n- data/weather.parquet",
"### data/demand.parquet\n\nThis file contains the electricity consumption values and has three columns.\n\n- unique_id: a unique id of the meter\n- timestamp: the timestamp of the recording in local time\n- y: the electricity consumption in kWh",
"### data/URL\n\nThis file collects the available metadata for the meters. The file contains the folloging columns:\n\n- unique_id: the unique id of the meter\n- location_id: a unique id for the location\n- latitude: approximate latitude of the building\n- longitude: approximate longitude of the building\n- building_type: type of the building (e.g. Residential, Hospital, etc.)",
"### data/weather.parquet\n\nThis file contains the collected weather data for all locations. The columns are the following:\n\n- location_id: the unique id for the location\n- timestamp: the timestamp of the observation in local time\n- temperature: the temperature of air at 2m above the surface of land in °C\n- dew_point: the temperature to which the air, at 2 metres above the surface of the Earth, would have to be cooled for saturation to occur in °C\n- pressure: the pressure of the atmosphere at the surface of the Earth, adjusted to the height of mean sea level in hPa\n- wind_speed: the absolute wind speed at a height of ten metres above the surface of the Earth, in m/s\n- wind_gust: maximum 3 second wind at 10 m height as defined by WMO, in m/s\n- wind_bearing: te direction the wind is originates from in degrees\n- precipitation: the accumulated liquid and frozen water, comprising rain and snow, that falls to the Earth's surface in mm\n- snow: the accumulated snow that falls to the Earth's surface in mm\n- cloud_cover: the proportion of a grid box covered by cloud in fractions between 0 to 1\n- solar_radiation: the amount of solar radiation that reaches a horizontal plane at the surface of the Earth in W/m2"
]
| [
41,
54,
18,
39,
64,
98,
305
]
| [
"passage: TAGS\n#task_categories-time-series-forecasting #language-English #license-bsd-2-clause #energy #electricity #region-us \n# Electricity Demand Dataset\n\n\n\nThis dataset compiles and harmonizes large body smart meter data, enabling machine learning solutions to address climate challenges.\n\n- Curated by: Attila Balint\n- License: BSD 2-clause \"Simplified\" licence## Uses\n\n\nThis smart meter dataset facilitates primarily electricity demand forecasting.## Dataset Structure\n\n\n\nThe dataset contains three main files.\n\n- data/load.parquet\n- data/metadata.parquet\n- data/weather.parquet### data/demand.parquet\n\nThis file contains the electricity consumption values and has three columns.\n\n- unique_id: a unique id of the meter\n- timestamp: the timestamp of the recording in local time\n- y: the electricity consumption in kWh### data/URL\n\nThis file collects the available metadata for the meters. The file contains the folloging columns:\n\n- unique_id: the unique id of the meter\n- location_id: a unique id for the location\n- latitude: approximate latitude of the building\n- longitude: approximate longitude of the building\n- building_type: type of the building (e.g. Residential, Hospital, etc.)"
]
|
d92c29649d7a4217a2c111d3291252e72b26faf0 | # Dataset Card for "oct-object-detection-v2"
Dataset is composed of images with multiples object detection box in coco format (x,y,w,h). Images are OCT (type of eye scaner) with boxes indicating some features associated to AMD disease.
The unique difference from v1 is images are grouped into a single row for the same class detection object.
[Source datataset](https://doi.org/10.1101/2023.03.29.534704) | joseluhf11/oct-object-detection-v2 | [
"region:us"
]
| 2023-11-15T09:15:33+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "objects", "struct": [{"name": "bbox", "sequence": {"sequence": "int64"}}, {"name": "categories", "sequence": "string"}]}], "splits": [{"name": "train", "num_bytes": 151816462.898, "num_examples": 1246}], "download_size": 71645254, "dataset_size": 151816462.898}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-22T08:39:07+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "oct-object-detection-v2"
Dataset is composed of images with multiples object detection box in coco format (x,y,w,h). Images are OCT (type of eye scaner) with boxes indicating some features associated to AMD disease.
The unique difference from v1 is images are grouped into a single row for the same class detection object.
Source datataset | [
"# Dataset Card for \"oct-object-detection-v2\"\nDataset is composed of images with multiples object detection box in coco format (x,y,w,h). Images are OCT (type of eye scaner) with boxes indicating some features associated to AMD disease. \nThe unique difference from v1 is images are grouped into a single row for the same class detection object.\nSource datataset"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"oct-object-detection-v2\"\nDataset is composed of images with multiples object detection box in coco format (x,y,w,h). Images are OCT (type of eye scaner) with boxes indicating some features associated to AMD disease. \nThe unique difference from v1 is images are grouped into a single row for the same class detection object.\nSource datataset"
]
| [
6,
94
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"oct-object-detection-v2\"\nDataset is composed of images with multiples object detection box in coco format (x,y,w,h). Images are OCT (type of eye scaner) with boxes indicating some features associated to AMD disease. \nThe unique difference from v1 is images are grouped into a single row for the same class detection object.\nSource datataset"
]
|
737bab1bd42de611f3887e9d3e9866edf5df2780 | # Ultrafeedback binarized dataset using the mean of preference ratings
## Introduction
This dataset contains the result of curation work performed by Argilla (using Argilla 😃).
After visually browsing around some examples using the sort and filter feature of Argilla (sort by highest rating for chosen responses), we noticed a strong mismatch between the `overall_score` in the original UF dataset (and the Zephyr train_prefs dataset) and the quality of the chosen response.
By adding the critique rationale to our Argilla Dataset, we confirmed the critique rationale was highly negative, whereas the rating was very high (the highest in fact: `10`). See screenshot below for one example of this issue. After some quick investigation, we identified hundreds of examples having the same issue and a potential bug on the UltraFeedback repo.

For context, [this is the corresponding example](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized/viewer/default/train_prefs?row=52108) within the `train_prefs` dataset with a `score_chosen` of `10`.
The dataset is fully open and browsable at https://huggingface.co/spaces/argilla/ultrafeedback-curator (credentials: owner/12345678). Try browsing by discarded or using the sort feature to find problematic records yourself.
## Dataset processing
1. We have identified a buggy behaviour of how `overall_score` was generated in the UltraFeedback dataset using the Critique Model, which caused very low quality (and rated) responses to get a very high score. The reason [is this line](https://github.com/OpenBMB/UltraFeedback/blob/e662fd291e5bdf9103a70c2496dc8f1fbcaefe7b/src/data_annotation/annotate_critique.py#L81) which will give a **`10` to responses that get a `1` from the Critique model**.
2. To **benefit from the preference data of UltraFeedback** (aspect-based preference data: honesty, instruction-following, etc.) and not the **Critique model** (which **evaluates a single response individually**), we have opted for not using `overall_score` and compute the mean of preference ratings instead.
3. We **select the best reponse based on this mean** (named `best_rated_response`), and keep the one based on the overall_score for comparison purposes
4. We **select a random response with lower mean rating** (or equal in the worst case scenario, for preference tuning we'll filter those cases out), named `random_response_for_best_rated`. This follows the method described in the Zephyr paper of picking a random response instead of the lowest rated response. In any case, we keep all completions for people looking at additional approaches. One could binarize the data differently, for example generating several pairs per row based on their ranking (as done on the OpenAI work).
5. We **remove ties**.
Please note that the binarized version from H4 uses `*_best_overall` with scores `[1,10]` range and we use `avg_rating` in the `[1,5]` range.
Based on an initial analysis, using mean rating vs overall_score picks a different chosen response in ~30K examples (out of ~63K).
Additionally, using overall_score results in picking responses from less powerful models more often. See the distribution below:

## Reproduce
Steps:
- Compute mean of preference ratings (honesty, instruction-following, etc.)
- Pick the best mean rating as the chosen
- Pick random rejected with lower mean (or another random if equal to chosen rating)
- Filter out examples with chosen rating == rejected rating
Code for the base dataset preparation (you can use it for using another strategy for binarization):
```python
from typing import List, Dict, Optional, Any
from datasets import load_dataset
import random
# Load the dataset
dataset = load_dataset("openbmb/UltraFeedback", split="train")#test it: .select(range(10))
def calculate_average_rating(annotations: Dict[str, Any]) -> Optional[float]:
ratings = [int(details['Rating']) for details in annotations.values() if 'Rating' in details and details['Rating'] != "N/A"]
return sum(ratings) / len(ratings) if ratings else None
def select_rejected_responses(completions: List[Dict[str, Any]], comparison_key: str, best_score: float) -> Optional[Dict[str, Any]]:
eligible_responses = [resp for resp in completions if resp.get(comparison_key, -1) < best_score and resp.get(comparison_key) is not None]
sorted_eligible_responses = sorted(eligible_responses, key=lambda x: x.get(comparison_key, -1), reverse=True)
return sorted_eligible_responses#random.choice(eligible_responses) if eligible_responses else None
def process_dataset(record: Dict[str, Any]) -> Dict[str, Any]:
completions = record.get('completions', [])
if not completions:
return {**record, 'best_rated_response': None, 'random_response_for_rated': None}
for response in completions:
response['average_rating'] = calculate_average_rating(response.get('annotations', {}))
best_rated_response = max(completions, key=lambda x: x.get('average_rating', -1))
rejected_responses_list = select_rejected_responses(completions, 'average_rating', best_rated_response.get('average_rating', -1))
rejected_ratings = []
rejected_responses = []
rejected_models = []
for rejected in rejected_responses_list:
rejected_ratings.append(rejected['average_rating'])
rejected_responses.append(rejected['response'])
rejected_models.append(rejected['model'])
if len(rejected_responses_list)>=0:
return {
'source': record['source'],
'instruction': record['instruction'],
'chosen_model': best_rated_response.get("model"),
'chosen_rating': best_rated_response.get("average_rating"),
'chosen_response': best_rated_response['response'],
'rejected_responses': rejected_responses,
'rejected_ratings': rejected_ratings,
'completions': completions
}
else:
print(f"No candidates for best with rating: {best_rated_response}")
results = [process_dataset(record) for record in dataset if len(record['completions'])>0]
```
Example record:
```json
{
"source":"flan_v2_niv2",
"instruction":"In this task, you're given a sentence and your task is to generate a simplified sentence that focuses on lexical paraphrasing from the original sentence given in the input.\nQ: many other authors of this period described eastern mediterranean cooking in greek and in latin.\nA: ",
"chosen_model":"mpt-30b-chat",
"chosen_rating":4.75,
"chosen_response":"Other writers of that time wrote about eastern Mediterranean cuisine in both Greek and Latin.",
"rejected_responses":[
"Other authors from this period wrote about Mediterranean cooking in Greek and Latin.",
"It turns out many other authors during that time period described Eastern Mediterranean cooking, both in Greek and Latin language.\n\nIn this task, you started with a sentence that describes a topic using complex vocabulary. Your task was to create a simplified version of that sentence. \n\nYour new sentence uses simpler words and language to express the same idea. It tells us that many other writers from the past wrote about food from Eastern Mediterranean countries using both Greek and Latin languages.",
"The authors of the era wrote extensively about Eastern Mediterranean cuisine in both Greek and Latin.\n\nIn this simplified sentence, I maintained the original phrase \"of this period\" to honor the historical context, while changing \"Greek and in Latin\" to a simpler construction that better conveys the same meaning without sounding repetitive or wordy. I also added a positive tone and made sure the phrase was clear and concise.\nIs there anything else I can help you with?"
],
"rejected_ratings":[
4.5,
3.5,
3.25
]
}
```
| argilla/ultrafeedback-binarized-preferences | [
"region:us"
]
| 2023-11-15T09:20:39+00:00 | {"dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "chosen_response", "dtype": "string"}, {"name": "rejected_response", "dtype": "string"}, {"name": "chosen_avg_rating", "dtype": "float64"}, {"name": "rejected_avg_rating", "dtype": "float64"}, {"name": "chosen_model", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 203496687.77711597, "num_examples": 63619}], "download_size": 109861341, "dataset_size": 203496687.77711597}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-30T13:45:57+00:00 | []
| []
| TAGS
#region-us
| # Ultrafeedback binarized dataset using the mean of preference ratings
## Introduction
This dataset contains the result of curation work performed by Argilla (using Argilla ).
After visually browsing around some examples using the sort and filter feature of Argilla (sort by highest rating for chosen responses), we noticed a strong mismatch between the 'overall_score' in the original UF dataset (and the Zephyr train_prefs dataset) and the quality of the chosen response.
By adding the critique rationale to our Argilla Dataset, we confirmed the critique rationale was highly negative, whereas the rating was very high (the highest in fact: '10'). See screenshot below for one example of this issue. After some quick investigation, we identified hundreds of examples having the same issue and a potential bug on the UltraFeedback repo.
!image/png
For context, this is the corresponding example within the 'train_prefs' dataset with a 'score_chosen' of '10'.
The dataset is fully open and browsable at URL (credentials: owner/12345678). Try browsing by discarded or using the sort feature to find problematic records yourself.
## Dataset processing
1. We have identified a buggy behaviour of how 'overall_score' was generated in the UltraFeedback dataset using the Critique Model, which caused very low quality (and rated) responses to get a very high score. The reason is this line which will give a '10' to responses that get a '1' from the Critique model.
2. To benefit from the preference data of UltraFeedback (aspect-based preference data: honesty, instruction-following, etc.) and not the Critique model (which evaluates a single response individually), we have opted for not using 'overall_score' and compute the mean of preference ratings instead.
3. We select the best reponse based on this mean (named 'best_rated_response'), and keep the one based on the overall_score for comparison purposes
4. We select a random response with lower mean rating (or equal in the worst case scenario, for preference tuning we'll filter those cases out), named 'random_response_for_best_rated'. This follows the method described in the Zephyr paper of picking a random response instead of the lowest rated response. In any case, we keep all completions for people looking at additional approaches. One could binarize the data differently, for example generating several pairs per row based on their ranking (as done on the OpenAI work).
5. We remove ties.
Please note that the binarized version from H4 uses '*_best_overall' with scores '[1,10]' range and we use 'avg_rating' in the '[1,5]' range.
Based on an initial analysis, using mean rating vs overall_score picks a different chosen response in ~30K examples (out of ~63K).
Additionally, using overall_score results in picking responses from less powerful models more often. See the distribution below:
!image/png
## Reproduce
Steps:
- Compute mean of preference ratings (honesty, instruction-following, etc.)
- Pick the best mean rating as the chosen
- Pick random rejected with lower mean (or another random if equal to chosen rating)
- Filter out examples with chosen rating == rejected rating
Code for the base dataset preparation (you can use it for using another strategy for binarization):
Example record:
| [
"# Ultrafeedback binarized dataset using the mean of preference ratings",
"## Introduction\n\nThis dataset contains the result of curation work performed by Argilla (using Argilla ). \n\n\nAfter visually browsing around some examples using the sort and filter feature of Argilla (sort by highest rating for chosen responses), we noticed a strong mismatch between the 'overall_score' in the original UF dataset (and the Zephyr train_prefs dataset) and the quality of the chosen response. \n\nBy adding the critique rationale to our Argilla Dataset, we confirmed the critique rationale was highly negative, whereas the rating was very high (the highest in fact: '10'). See screenshot below for one example of this issue. After some quick investigation, we identified hundreds of examples having the same issue and a potential bug on the UltraFeedback repo.\n\n\n\n!image/png\n\nFor context, this is the corresponding example within the 'train_prefs' dataset with a 'score_chosen' of '10'.\n\n\nThe dataset is fully open and browsable at URL (credentials: owner/12345678). Try browsing by discarded or using the sort feature to find problematic records yourself.",
"## Dataset processing\n\n1. We have identified a buggy behaviour of how 'overall_score' was generated in the UltraFeedback dataset using the Critique Model, which caused very low quality (and rated) responses to get a very high score. The reason is this line which will give a '10' to responses that get a '1' from the Critique model.\n2. To benefit from the preference data of UltraFeedback (aspect-based preference data: honesty, instruction-following, etc.) and not the Critique model (which evaluates a single response individually), we have opted for not using 'overall_score' and compute the mean of preference ratings instead.\n3. We select the best reponse based on this mean (named 'best_rated_response'), and keep the one based on the overall_score for comparison purposes\n4. We select a random response with lower mean rating (or equal in the worst case scenario, for preference tuning we'll filter those cases out), named 'random_response_for_best_rated'. This follows the method described in the Zephyr paper of picking a random response instead of the lowest rated response. In any case, we keep all completions for people looking at additional approaches. One could binarize the data differently, for example generating several pairs per row based on their ranking (as done on the OpenAI work).\n5. We remove ties.\n\nPlease note that the binarized version from H4 uses '*_best_overall' with scores '[1,10]' range and we use 'avg_rating' in the '[1,5]' range.\n\nBased on an initial analysis, using mean rating vs overall_score picks a different chosen response in ~30K examples (out of ~63K). \nAdditionally, using overall_score results in picking responses from less powerful models more often. See the distribution below:\n\n\n!image/png",
"## Reproduce\n\nSteps:\n\n- Compute mean of preference ratings (honesty, instruction-following, etc.)\n- Pick the best mean rating as the chosen\n- Pick random rejected with lower mean (or another random if equal to chosen rating)\n- Filter out examples with chosen rating == rejected rating\n\nCode for the base dataset preparation (you can use it for using another strategy for binarization):\n\n\n\n\nExample record:"
]
| [
"TAGS\n#region-us \n",
"# Ultrafeedback binarized dataset using the mean of preference ratings",
"## Introduction\n\nThis dataset contains the result of curation work performed by Argilla (using Argilla ). \n\n\nAfter visually browsing around some examples using the sort and filter feature of Argilla (sort by highest rating for chosen responses), we noticed a strong mismatch between the 'overall_score' in the original UF dataset (and the Zephyr train_prefs dataset) and the quality of the chosen response. \n\nBy adding the critique rationale to our Argilla Dataset, we confirmed the critique rationale was highly negative, whereas the rating was very high (the highest in fact: '10'). See screenshot below for one example of this issue. After some quick investigation, we identified hundreds of examples having the same issue and a potential bug on the UltraFeedback repo.\n\n\n\n!image/png\n\nFor context, this is the corresponding example within the 'train_prefs' dataset with a 'score_chosen' of '10'.\n\n\nThe dataset is fully open and browsable at URL (credentials: owner/12345678). Try browsing by discarded or using the sort feature to find problematic records yourself.",
"## Dataset processing\n\n1. We have identified a buggy behaviour of how 'overall_score' was generated in the UltraFeedback dataset using the Critique Model, which caused very low quality (and rated) responses to get a very high score. The reason is this line which will give a '10' to responses that get a '1' from the Critique model.\n2. To benefit from the preference data of UltraFeedback (aspect-based preference data: honesty, instruction-following, etc.) and not the Critique model (which evaluates a single response individually), we have opted for not using 'overall_score' and compute the mean of preference ratings instead.\n3. We select the best reponse based on this mean (named 'best_rated_response'), and keep the one based on the overall_score for comparison purposes\n4. We select a random response with lower mean rating (or equal in the worst case scenario, for preference tuning we'll filter those cases out), named 'random_response_for_best_rated'. This follows the method described in the Zephyr paper of picking a random response instead of the lowest rated response. In any case, we keep all completions for people looking at additional approaches. One could binarize the data differently, for example generating several pairs per row based on their ranking (as done on the OpenAI work).\n5. We remove ties.\n\nPlease note that the binarized version from H4 uses '*_best_overall' with scores '[1,10]' range and we use 'avg_rating' in the '[1,5]' range.\n\nBased on an initial analysis, using mean rating vs overall_score picks a different chosen response in ~30K examples (out of ~63K). \nAdditionally, using overall_score results in picking responses from less powerful models more often. See the distribution below:\n\n\n!image/png",
"## Reproduce\n\nSteps:\n\n- Compute mean of preference ratings (honesty, instruction-following, etc.)\n- Pick the best mean rating as the chosen\n- Pick random rejected with lower mean (or another random if equal to chosen rating)\n- Filter out examples with chosen rating == rejected rating\n\nCode for the base dataset preparation (you can use it for using another strategy for binarization):\n\n\n\n\nExample record:"
]
| [
6,
16,
255,
443,
98
]
| [
"passage: TAGS\n#region-us \n# Ultrafeedback binarized dataset using the mean of preference ratings## Introduction\n\nThis dataset contains the result of curation work performed by Argilla (using Argilla ). \n\n\nAfter visually browsing around some examples using the sort and filter feature of Argilla (sort by highest rating for chosen responses), we noticed a strong mismatch between the 'overall_score' in the original UF dataset (and the Zephyr train_prefs dataset) and the quality of the chosen response. \n\nBy adding the critique rationale to our Argilla Dataset, we confirmed the critique rationale was highly negative, whereas the rating was very high (the highest in fact: '10'). See screenshot below for one example of this issue. After some quick investigation, we identified hundreds of examples having the same issue and a potential bug on the UltraFeedback repo.\n\n\n\n!image/png\n\nFor context, this is the corresponding example within the 'train_prefs' dataset with a 'score_chosen' of '10'.\n\n\nThe dataset is fully open and browsable at URL (credentials: owner/12345678). Try browsing by discarded or using the sort feature to find problematic records yourself."
]
|
d853a4e45e63f948d04164c44d39c1aac93fd182 | # Vision-Language Instruction Tuning: A Review and Analysis
---
**Chen Li<sup>1</sup>, Yixiao Ge<sup>1</sup>, Dian Li<sup>2</sup>, and Ying Shan<sup>1</sup>.**
**<sup>1</sup>ARC Lab, Tencent PCG**<br>
**<sup>2</sup>Foundation Technology Center, Tencent PCG**
<p align="center">
<img src="https://i.imgur.com/waxVImv.png" alt="Oryx Video-ChatGPT">
</p>
<a href='https://huggingface.co/datasets/lllchenlll/COCO_ARC'><img src='https://img.shields.io/badge/Data-Huggingface-ebc634'></a>
<a href='https://creativecommons.org/licenses/by/4.0/'><img src='https://img.shields.io/badge/License-CC%20BY--SA%204.0-eb9334'></a>
<a href='https://arxiv.org/abs/2311.08172'><img src='https://img.shields.io/badge/Paper-ArXiv-eb4c34'></a>
This paper is a review of all the works related to vision-language instruction tuning (VLIT). We will periodically update the recent public VLIT dataset and the VLIT data constructed by the pipeline in this paper.
---
## 📆 Schedule
- [ ] Release New Vision-Language Instruction Data (periodically) ...
- [ ] Update Public VLIT Datasets and Related Work (periodically) ...
- [ ] Release Construction Tools
- [x] [2023.11.16] Release Instruction Data
- [x] [2023.11.15] Paper Released ([ArXiv](https://arxiv.org/abs/2311.08172))
## 🏷️ Catalogue
1. <a href="#label_evd">Existing VLIT Data</a>
2. <a href="#label_vdctp">VLIT Data Constructed in This Paper</a>
<span id="label_evd"> </span>
## 🗒️ Existing VLIT Dataset
Currently, the existing VLIT generation schemes can be divided into two categories, among which Annotation Adaption mainly relies on directly adjusting and rewriting the existing annotation data to adapt to the VLIT data template. Self-Instruct relies on the Large Language Model (LLM) to synthesize annotation data from more sources and reorganize it to generate VLIT data with more diversity and complexity (of course, it also brings more noise and hallucination).
```
VLIT Data
├─ General Instruction
│ ├─ Annotation Adaption
│ └─ Self-Instruct
├─ Specific Instruction
│ ├─ Object/Task-Specific
│ │ ├─ Region
│ │ ├─ Video
│ │ └─ Text
│ └─ Domain-Specific
│ ├─ Medicine
│ ├─ Document
│ └─ PointCloud
├─ Construction Tools
└─ Data Mixing
```
### Dataset
| Dataset | MLLM | Paper |
| :--- | :--- | :---|
| ... | ... | ... |
| LVIS-INSTRUCT4V | - | [To See is to Believe: Prompting GPT-4V for Better Visual Instruction Tuning](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/To%20See%20is%20to%20Believe-%20Prompting%20GPT-4V%20for%20Better%20Visual%20Instruction%20Tuning.pdf) |
| [GranD](https://github.com/mbzuai-oryx/groundingLMM#-grounding-anything-dataset-grand) | [GLaMM](https://github.com/mbzuai-oryx/groundingLMM) | [GLaMM: Pixel Grounding Large Multimodal Model](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/GLaMM%20-%20Pixel%20Grounding%20Large%20Multimodal%20Model.pdf) |
| ComVint | - | [What Makes for Good Visual Instructions? Synthesizing Complex Visual Reasoning Instructions for Visual Instruction Tuning](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/What%20Makes%20for%20Good%20Visual%20Instructions%3F%20Synthesizing%20Complex%20Visual%20Reasoning%20Instructions%20for%20Visual%20Instruction%20Tuning.pdf) |
| [MiniGPT-v2](https://github.com/Vision-CAIR/MiniGPT-4/blob/main/MiniGPTv2_Train.md) | [MiniGPT-v2](https://github.com/Vision-CAIR/MiniGPT-4/tree/main) | [MiniGPT-v2: Large Language Model As a Unified Interface for Vision-Language Multi-task Learning](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/MiniGPT-v2-%20Large%20Language%20Model%20As%20a%20Unified%20Interface%20for%20Vision-Language%20Multi-task%20Learning.pdf) |
| GRIT | [Ferret](https://github.com/apple/ml-ferret) | [FERRET REFER AND GROUND ANYTHING ANYWHERE AT ANY GRANULARITY](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/FERRET%20REFER%20AND%20GROUND%20ANYTHING%20ANYWHERE%20AT%20ANY%20GRANULARITY.pdf) |
| [SparklesDialogue-VG](https://github.com/HYPJUDY/Sparkles#data-sparklesdialogue) | [SparklesChat](https://github.com/HYPJUDY/Sparkles) | [Sparkles: Unlocking Chats Across Multiple Images for Multimodal Instruction-Following Models](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/Sparkles-%20Unlocking%20Chats%20Across%20Multiple%20Images%20for%20Multimodal%20Instruction-Following%20Models.pdf) |
| [SparklesDialogue-CC](https://github.com/HYPJUDY/Sparkles#data-sparklesdialogue) | [SparklesChat](https://github.com/HYPJUDY/Sparkles) | [Sparkles: Unlocking Chats Across Multiple Images for Multimodal Instruction-Following Models](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/Sparkles-%20Unlocking%20Chats%20Across%20Multiple%20Images%20for%20Multimodal%20Instruction-Following%20Models.pdf) |
| InternLM-XComposer | [InternLM-XComposer](https://github.com/InternLM/InternLM-XComposer) | [InternLM-XComposer: A Vision-Language Large Model for Advanced Text-image Comprehension and Composition](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/InternLM-XComposer-%20A%20Vision-Language%20Large%20Model%20for%20Advanced%20Text-image%20Comprehension%20and%20Composition.pdf) |
| AnyMAL | AnyMAL | [AnyMAL: An Efficient and Scalable Any-Modality Augmented Language Model](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/AnyMAL-%20An%20Efficient%20and%20Scalable%20Any-Modality%20Augmented%20Language%20Model.pdf) |
| DreamLLM | [DreamLLM](https://github.com/RunpeiDong/DreamLLM) | [DREAMLLM: SYNERGISTIC MULTIMODAL COMPREHENSION AND CREATION](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/DREAMLLM-%20SYNERGISTIC%20MULTIMODAL%20COMPREHENSION%20AND%20CREATION.pdf) |
| [TextBind](https://github.com/SihengLi99/TextBind#31-data-preparation) | [TextBind](https://github.com/SihengLi99/TextBind) | [TEXTBIND: Multi-turn Interleaved Multimodal Instruction-following in the Wild](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/TEXTBIND-%20Multi-turn%20Interleaved%20Multimodal%20Instruction-following%20in%20the%20Wild.pdf) |
| [PVIT](https://huggingface.co/PVIT) | [PVIT](https://github.com/PVIT-official/PVIT) | [Position-Enhanced Visual Instruction Tuning for Multimodal Large Language Models](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/Position-Enhanced%20Visual%20Instruction%20Tuning%20for%20Multimodal%20Large%20Language%20Models.pdf) |
| T2M | [NExT-GPT](https://github.com/NExT-GPT/NExT-GPT) | [NExT-GPT: Any-to-Any Multimodal LLM](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/NExT-GPT-%20Any-to-Any%20Multimodal%20LLM.pdf) |
| MosIT | [NExT-GPT](https://github.com/NExT-GPT/NExT-GPT) | [NExT-GPT: Any-to-Any Multimodal LLM](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/NExT-GPT-%20Any-to-Any%20Multimodal%20LLM.pdf) |
| [GPTVQA](https://opendatalab.com/OpenDataLab/DataEngine-InstData) | [MLLM-DataEngine](https://github.com/opendatalab/MLLM-DataEngine) | [MLLM-DataEngine: An Iterative Refinement Approach for MLLM](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/MLLM-DataEngine-%20An%20Iterative%20Refinement%20Approach%20for%20MLLM.pdf) |
| CIEM | - | [CIEM: Contrastive Instruction Evaluation Method for Better Instruction Tuning](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/CIEM-%20Contrastive%20Instruction%20Evaluation%20Method%20for%20Better%20Instruction%20Tuning.pdf) |
| [PointLLM](https://huggingface.co/datasets/RunsenXu/PointLLM/tree/main) | [PointLLM](https://github.com/OpenRobotLab/PointLLM) | [PointLLM: Empowering Large Language Models to Understand Point Clouds](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/PointLLM-%20Empowering%20Large%20Language%20Models%20to%20Understand%20Point%20Clouds.pdf) |
| [VIGC](https://opendatalab.com/OpenDataLab/VIGC-InstData) | [VIGC](https://github.com/opendatalab/VIGC) | [VIGC: Visual Instruction Generation and Correction](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/VIGC-%20Visual%20Instruction%20Generation%20and%20Correction.pdf) |
| M-HalDetec | - | [Detecting and Preventing Hallucinations in Large Vision Language Models](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/Detecting%20and%20Preventing%20Hallucinations%20in%20Large%20Vision%20Language%20Models.pdf) |
| [StableLLaVA](https://github.com/icoz69/StableLLAVA#pipeline) | [StableLLaVA](https://github.com/icoz69/StableLLAVA) | [StableLLaVA: Enhanced Visual Instruction Tuning with Synthesized Image-Dialogue Data](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/StableLLaVA-%20Enhanced%20Visual%20Instruction%20Tuning%20with%20Synthesized%20Image-Dialogue%20Data.pdf) |
| [I4](https://github.com/DCDmllm/Cheetah/tree/main/I4%20Benchmark) | [Cheetor](https://github.com/DCDmllm/Cheetah) | [EMPOWERING VISION-LANGUAGE MODELS TO FOLLOW INTERLEAVED VISION-LANGUAGE INSTRUCTIONS](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/EMPOWERING%20VISION-LANGUAGE%20MODELS%20TO%20FOLLOW%20INTERLEAVED%20VISION-LANGUAGE%20INSTRUCTIONS.pdf) |
| [AS-1B](https://huggingface.co/spaces/OpenGVLab/all-seeing) | [ASM](https://github.com/OpenGVLab/All-Seeing) | [The All-Seeing Project: Towards Panoptic Visual Recognition and Understanding of the Open World](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/The%20All-Seeing%20Project-%20Towards%20Panoptic%20Visual%20Recognition%20and%20Understanding%20of%20the%20Open%20World.pdf) |
| [Multimodal_id_v1](https://huggingface.co/datasets/YunxinLi/Multimodal_Instruction_data_v1) | [LMEye(IPN)](https://github.com/YunxinLi/LingCloud) | [LMEye: An Interactive Perception Network for Large Language Models](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/LMEye-%20An%20Interactive%20Perception%20Network%20for%20Large%20Language%20Models.pdf) |
| [Lynx](https://github.com/bytedance/lynx-llm#prepare-data) | [Lynx](https://github.com/bytedance/lynx-llm) | [What Matters in Training a GPT4-Style Language Model with Multimodal Inputs?](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/What%20Matters%20in%20Training%20a%20GPT4-Style%20Language%20Model%20with%20Multimodal%20Inputs%3F.pdf) |
| MGVLID | ChatSpot | [ChatSpot: Bootstrapping Multimodal LLMs via Precise Referring Instruction Tuning](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/ChatSpot-%20Bootstrapping%20Multimodal%20LLMs%20via%20Precise%20Referring%20Instruction%20Tuning.pdf) |
| [BuboGPT](https://huggingface.co/datasets/magicr/BuboGPT/tree/main) | [BuboGPT](https://github.com/magic-research/bubogpt) | [BuboGPT: Enabling Visual Grounding in Multi-Modal LLMs](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/BuboGPT-%20Enabling%20Visual%20Grounding%20in%20Multi-Modal%20LLMs.pdf) |
| [GRIT-20M](https://huggingface.co/datasets/zzliang/GRIT) | [KOSMOS-2](https://github.com/microsoft/unilm/tree/master/kosmos-2) | [KOSMOS-2: Grounding Multimodal Large Language Models to the World](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/KOSMOS-2-%20Grounding%20Multimodal%20Large%20Language%20Models%20to%20the%20World.pdf) |
| [SVIT](https://huggingface.co/datasets/BAAI/SVIT) | [SVIT(MMLLM)](https://github.com/BAAI-DCAI/Visual-Instruction-Tuning) | [SVIT: Scaling up Visual Instruction Tuning](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/SVIT-%20Scaling%20up%20Visual%20Instruction%20Tuning.pdf) |
| [GPT4RoI](https://github.com/jshilong/GPT4RoI#data) | [GPT4RoI](https://github.com/jshilong/GPT4RoI) | [GPT4RoI: Instruction Tuning Large Language Model on Region-of-Interest](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/GPT4RoI-%20Instruction%20Tuning%20Large%20Language%20Model%20on%20Region-of-Interest.pdf) |
| [PF-1M](https://huggingface.co/datasets/chendelong/PF-1M) | [Clever Flamingo](https://github.com/ChenDelong1999/polite-flamingo) | [Visual Instruction Tuning with Polite Flamingo](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/Visual%20Instruction%20Tuning%20with%20Polite%20Flamingo.pdf) |
| [Shikra-RD](https://github.com/shikras/shikra/blob/main/docs/data.md) | [Shikra](https://github.com/shikras/shikra) | [Shikra: Unleashing Multimodal LLM’s Referential Dialogue Magic](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/Shikra-%20Unleashing%20Multimodal%20LLM%E2%80%99s%20Referential%20Dialogue%20Magic.pdf) |
| [LLaVAR](https://huggingface.co/datasets/SALT-NLP/LLaVAR) | [LLaVAR](https://github.com/SALT-NLP/LLaVAR) | [LLaVAR: Enhanced Visual Instruction Tuning for Text-Rich Image Understanding](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/LLaVAR-%20Enhanced%20Visual%20Instruction%20Tuning%20for%20Text-Rich%20Image%20Understanding.pdf) |
| OphGLM | [OphGLM](https://github.com/ML-AILab/OphGLM) | [OphGLM: Training an Ophthalmology Large Language-and-Vision Assistant based on Instructions and Dialogue](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/OphGLM-%20Training%20an%20Ophthalmology%20Large%20Language-and-Vision%20Assistant%20based%20on%20Instructions%20and%20Dialogue.pdf) |
| [LAMM](https://opendatalab.com/LAMM/download) | [LAMM](https://github.com/OpenGVLab/LAMM) | [LAMM: Language-Assisted Multi-Modal Instruction-Tuning Dataset, Framework, and Benchmark](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/LAMM-%20Language-Assisted%20Multi-Modal%20Instruction-Tuning%20Dataset%2C%20Framework%2C%20and%20Benchmark.pdf) |
| [MACAW-LLM](https://github.com/lyuchenyang/Macaw-LLM#usage-) | [MACAW-LLM](https://github.com/lyuchenyang/Macaw-LLM) | [Macaw-LLM: Multi-Modal Language Modeling with Image, Audio, Video, and Text Integration](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/Macaw-LLM-%20Multi-Modal%20Language%20Modeling%20with%20Image%2C%20Audio%2C%20Video%2C%20and%20Text%20Integration.pdf) |
| InstructBLIP | [InstructBLIP](https://github.com/salesforce/LAVIS/tree/main/projects/instructblip) | [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/InstructBLIP-%20Towards%20General-purpose%20Vision-Language%20Models%20with%20Instruction%20Tuning.pdf) |
| [MultiModal-GPT](https://github.com/open-mmlab/Multimodal-GPT#prepare-datasets) | [MultiModal-GPT](https://github.com/open-mmlab/Multimodal-GPT) | [MultiModal-GPT: A Vision and Language Model for Dialogue with Humans](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/MultiModal-GPT-%20A%20Vision%20and%20Language%20Model%20for%20Dialogue%20with%20Humans.pdf) |
| [Valley-Instruct-73](https://huggingface.co/datasets/luoruipu1/Valley-Instruct-73k) | [VALLEY](https://github.com/RupertLuo/Valley) | [VALLEY: VIDEO ASSISTANT WITH LARGE LANGUAGE MODEL ENHANCED ABILITY](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/VALLEY-%20VIDEO%20ASSISTANT%20WITH%20LARGE%20LANGUAGE%20MODEL%20ENHANCED%20ABILITY.pdf) |
| [Video-LLaMA](https://github.com/DAMO-NLP-SG/Video-LLaMA#data) | [Video-LLaMA](https://github.com/DAMO-NLP-SG/Video-LLaMA) | [Video-LLaMA: An Instruction-tuned Audio-Visual Language Model for Video Understanding](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/Video-LLaMA-%20An%20Instruction-tuned%20Audio-Visual%20Language%20Model%20for%20Video%20Understanding.pdf) |
| [MULTIINSTRUCT](https://github.com/VT-NLP/MultiInstruct#usage) | [OFA(multiinstruct)](https://github.com/VT-NLP/MultiInstruct) | [MULTIINSTRUCT: Improving Multi-Modal Zero-Shot Learning via Instruction Tuning](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/MULTIINSTRUCT-%20Improving%20Multi-Modal%20Zero-Shot%20Learning%20via%20Instruction%20Tuning.pdf) |
| [Video-ChatGPT](https://github.com/mbzuai-oryx/Video-ChatGPT#video-instruction-dataset-open_file_folder) | [Video-ChatGPT](https://github.com/mbzuai-oryx/Video-ChatGPT) | [Video-ChatGPT: Towards Detailed Video Understanding via Large Vision and Language Models](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/Video-ChatGPT-%20Towards%20Detailed%20Video%20Understanding%20via%20Large%20Vision%20and%20Language%20Models.pdf) |
| [MIMIC-IT](https://github.com/Luodian/Otter/blob/main/mimic-it/README.md) | [Otter](https://github.com/Luodian/Otter) | [MIMIC-IT: Multi-Modal In-Context Instruction Tuning](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/MIMIC-IT-%20Multi-Modal%20In-Context%20Instruction%20Tuning.pdf) |
| [M3IT](https://huggingface.co/datasets/MMInstruction/M3IT) | Ying-VLM | [M3IT: A Large-Scale Dataset towards Multi-Modal Multilingual Instruction Tuning](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/M3IT-%20A%20Large-Scale%20Dataset%20towards%20Multi-Modal%20Multilingual%20Instruction%20Tuning.pdf) |
| [GPT4Tools](https://github.com/AILab-CVC/GPT4Tools#dataset) | [GPT4Tools](https://github.com/AILab-CVC/GPT4Tools) | [GPT4Tools: Teaching Large Language Model to Use Tools via Self-instruction](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/GPT4Tools-%20Teaching%20Large%20Language%20Model%20to%20Use%20Tools%20via%20Self-instruction.pdf) |
| [PMC-VQA](https://huggingface.co/datasets/xmcmic/PMC-VQA) | [MedVInT-TE/TD](https://github.com/xiaoman-zhang/PMC-VQA) | [PMC-VQA: Visual Instruction Tuning for Medical Visual Question Answering](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/PMC-VQA-%20Visual%20Instruction%20Tuning%20for%20Medical%20Visual%20Question%20Answering.pdf) |
| [pandagpt_vid](https://huggingface.co/datasets/openllmplayground/pandagpt_visual_instruction_dataset) | [PandaGPT](https://github.com/yxuansu/PandaGPT) | [PandaGPT: One Model To Instruction-Follow Them All](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/PandaGPT-%20One%20Model%20To%20Instruction-Follow%20Them%20All.pdf) |
| [MULTIS](https://github.com/joez17/ChatBridge/blob/main/custom_datasets/valor_data/DATASET.md#second-stage-dataset-preparation) | [ChatBridge](https://github.com/joez17/ChatBridge) | [ChatBridge: Bridging Modalities with Large Language Model as a Language Catalyst](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/ChatBridge-%20Bridging%20Modalities%20with%20Large%20Language%20Model%20as%20a%20Language%20Catalyst.pdf) |
| [DetGPT](https://github.com/OptimalScale/DetGPT/blob/main/dataset/download.sh) | [DetGPT](https://github.com/OptimalScale/DetGPT) | [DetGPT: Detect What You Need via Reasoning](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/DetGPT-%20Detect%20What%20You%20Need%20via%20Reasoning.pdf) |
| [X-LLM](https://github.com/phellonchen/X-LLM#dataset) | [X-LLM](https://github.com/phellonchen/X-LLM) | [X-LLM: Bootstrapping Advanced Large Language Models by Treating Multi-Modalities as Foreign Languages](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/X-LLM-%20Bootstrapping%20Advanced%20Large%20Language%20Models%20by%20Treating%20Multi-Modalities%20as%20Foreign%20Languages.pdf) |
| [VideoChat](https://github.com/OpenGVLab/InternVideo/tree/main/Data/instruction_data) | [VideoChat](https://github.com/OpenGVLab/Ask-Anything) | [Videochat: Chat-centric video understanding](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/VideoChat%20-%20Chat-Centric%20Video%20Understanding.pdf) |
| [cc_sbu_align](https://huggingface.co/datasets/Vision-CAIR/cc_sbu_align) | [Mini-GPT4](https://github.com/Vision-CAIR/MiniGPT-4) | [Minigpt-4: Enhancing vision-language understanding with advanced large language models](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/Minigpt-4-%20Enhancing%20vision-language%20understanding%20with%20advanced%20large%20language%20models.pdf) |
| [LLaVA_instruct_150K](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K) | [LLaVA](https://github.com/haotian-liu/LLaVA) | [Visual Instruction Tuning](https://github.com/palchenli/VL-Instruction-Tuning/blob/main/assert/paper/Visual%20Instruction%20Tuning.pdf) |
<span id="label_vdctp"> </span>
## 📂 Multi-Modal Instruction Data Constructed in This Paper
In this paper, we propose a vision-language instruction construction pipeline and generate a corresponding dataset.
Specifically, the generated instruction data is a multi-round question answering about a given image.
Here are some examples of the generated instruction data:
### Examples
| Image | Conversation |
| :--- | :--- |
| <img src="https://github.com/palchenli/VL-Instruction-Tuning/raw/main/assert/images/COCO_train2014_000000288586.jpg" width="200%"/> | User: Can you describe the scene in the image?<br>AI: Sure, the image captures a large grizzly bear wading through a moving river. The bear has a brown coat and appears to be quite large and furry. It's currently not holding anything in its mouth and it doesn't seem to have caught any fish from the river.<br>User: Are grizzly bears typically found in such environments?<br>AI: Yes, indeed. Grizzly bears are often found near rivers or streams. They are excellent swimmers and often go into the water in search of food. During salmon spawning seasons, they are known to frequent river areas to catch the fish.<br>User: Is the bear possibly looking for food?<br>AI: That's a strong possibility. While there are no fish present in the river in the image, grizzly bears are known to forage for various food sources in rivers, including fish and other aquatic organisms. However, without further context, it's hard to say definitively.<br>User: What other characteristics does a grizzly bear have?<br>AI: Grizzly bears, which are a subspecies of brown bear, are known for their size, strength, and excellent swimming ability. They have a distinct hump over their shoulders which is a muscle mass used for digging and clawing. Their fur color can vary from very light tan to dark brown, with the fur on their back often being darker than the rest of their body. Also, despite their large size, grizzlies are quite fast and can run at speeds of up to 30 miles per hour. |
| <img src="https://github.com/palchenli/VL-Instruction-Tuning/raw/main/assert/images/COCO_train2014_000000030722.jpg" width="200%"/> | User: What is the main animal in the picture?<br>AI: The main animal in the picture is a giraffe.<br>User: What is the giraffe doing?<br>AI: The giraffe is standing next to a tree and some rocks. It's not doing much, just hanging around.<br>User: Is the giraffe standing on the rocks?<br>AI: No, the giraffe is not standing on the rocks. It is standing next to them.<br>User: Can you describe the setting in more detail?<br>AI: The picture mainly focuses on the giraffe, a tree, and some rocks. The giraffe is standing right next to the tree. There are rocks scattered around them.<br>User: Are there any elephants in the picture?<br>AI: No, there are no elephants in this picture. Only a single giraffe is present.<br>User: Are there any unique features of the giraffe?<br>AI: As per the details provided in the image, the giraffe is standing and has a spotted pattern on its skin, which is a characteristic feature of giraffes.<br>User: Is the giraffe sitting on the rocks?<br>AI: No, the giraffe is not sitting on the rocks. It's standing by them.<br>User: Is the giraffe eating the tree leaves?<br>AI: There's no indication in the image that the giraffe is eating the leaves of the tree. It's simply standing beside it.<br>User: Where do giraffes usually live?<br>AI: Giraffes are native to Africa and they usually inhabit savannas, grasslands, and open woodlands. However, in this particular image, the exact location isn't specified. |
| <img src="https://github.com/palchenli/VL-Instruction-Tuning/raw/main/assert/images/COCO_train2014_000000247884_0.jpg" width="200%"/> | User: Is there a car in the picture?<br>AI: Yes, there is a car in the picture and it appears to be parked.<br>User: What can you tell me about the car?<br>AI: Based on the image, the car is parked, which suggests that it is not currently in use. However, I can't provide specifics about its make, model, or color without more visual information.<br>User: Do you think the car is related to the boy on the skateboard?<br>AI: It's hard to definitively state a connection between the car and the boy on the skateboard based on the image alone. The car may belong to someone who is watching the boy, or it may just be parked in the vicinity where the boy is skateboarding.<br>User: Is the car moving?<br>AI: No, the car isn't moving. It's parked.<br>User: Could the car potentially pose a danger to the skateboarder?<br>AI: Since the car is parked, it's unlikely to pose a direct threat to the skateboarder. However, any vehicle, parked or moving, in the vicinity of skateboarders could potentially become a hazard, especially if the skateboarder loses control. This emphasizes the importance of skateboarding in safe, designated areas.<br>User: Is the car possibly there for a skateboarding event?<br>AI: Based on the information available, I can't definitely say if the car is there for a skateboarding event. It could belong to a spectator or participant, or it could be unrelated to the skateboarding activity. More context would be needed to make a clear conclusion. |
There are three different types of instruction data. The data statistics and download links are as follows.
### Download Links
| Data Type | Baidu CLoud | Google Drive | Huggingface |
| :--- | :---: | :---: | :---: |
| COCO_2014_Images | [url](https://cocodataset.org/) | [url](https://cocodataset.org/) | [url](https://cocodataset.org/) |
| Global | [url](https://pan.baidu.com/s/15Ge_lwge-YOxL55_0roOfA?pwd=inok) | [url](https://drive.usercontent.google.com/download?id=1rEzH0RhWqjq8W6zXc-t8Q3Tg3ncB1dpN&export=download&authuser=0&confirm=t&uuid=f574c321-ad4c-438e-94a6-8790db70c58f&at=APZUnTVglRBUCUC6tax-d3OH33Io:1700050876759) | [url](https://huggingface.co/datasets/lllchenlll/COCO_ARC/resolve/main/global.json?download=true) |
| Negative | [url](https://pan.baidu.com/s/1wuCkm443ufpG3-xcHVrRNA?pwd=auc7) | [url](https://drive.usercontent.google.com/download?id=1sQurFP7M_Ftd2Q5NSZm41_PCMT4ECd0g&export=download&authuser=0&confirm=t&uuid=fb82922c-0fd0-4b47-a5f1-af70f4d1b300&at=APZUnTUOOoYjM2gAhK79wsUkKUFk:1700051467871) | [url](https://huggingface.co/datasets/lllchenlll/COCO_ARC/resolve/main/negative.json?download=true) |
| Region | [url](https://pan.baidu.com/s/15m1RMpeirEz83Jsxd8zC0w?pwd=96p5) | [url](https://drive.usercontent.google.com/download?id=1Qbk4cOfTcrsPx7k1rD0E20hTkdhYNfBU&export=download&authuser=0&confirm=t&uuid=6fa256d3-e085-4089-9073-11799a7b3b74&at=APZUnTXdeLntbNQeEWgpD7SvulsM:1700051759650) | [url](https://huggingface.co/datasets/lllchenlll/COCO_ARC/resolve/main/region.json?download=true) |
| Region_Images | [url](https://pan.baidu.com/s/1NpggqYSLTjcTSlohLcXKLA?pwd=mhgo) | [url](https://drive.usercontent.google.com/download?id=1FMsU3sZLXDtumrNJK6CXgOd_YMIWXKaf&export=download&authuser=0&confirm=t&uuid=4eecfe1f-9807-478b-b501-54330c3713f4&at=APZUnTUCjk0W087kFmx5TECtBHRc:1700139356648) | [url](https://huggingface.co/datasets/lllchenlll/COCO_ARC/resolve/main/region_images.zip?download=true) |
### Data Format
```json
{
"image_source": "",
"construction_time": "",
"annotations": [
{
"img_ids": "",
"instruction_type": "",
"conversations": []
},
{
"img_ids": "",
"instruction_type": "",
"conversations": []
}
]
}
```
## 📎 Citation
If you found this repository useful, please consider citing:
```
@article{li2023visionlanguage,
title={Vision-Language Instruction Tuning: A Review and Analysis},
author={Chen Li and Yixiao Ge and Dian Li and Ying Shan},
year={2023},
eprint={2311.08172},
archivePrefix={arXiv},
primaryClass={cs.MM}
}
```
## 👍🏻 Acknowledgement
We would like to thank [LLaVA](https://github.com/haotian-liu/LLaVA), [LAVIS](https://github.com/salesforce/LAVIS) and [OpenFlamingo](https://github.com/mlfoundations/open_flamingo) for their well-architcated multi-modal LLMs.
Thanks to [SEED-Bench](https://github.com/AILab-CVC/SEED-Bench) for being an open source and convenient benchmark for evaluating MLLMs. | lllchenlll/COCO_ARC | [
"arxiv:2311.08172",
"region:us"
]
| 2023-11-15T09:41:23+00:00 | {} | 2023-11-17T02:10:10+00:00 | [
"2311.08172"
]
| []
| TAGS
#arxiv-2311.08172 #region-us
| Vision-Language Instruction Tuning: A Review and Analysis
=========================================================
---
Chen Li1, Yixiao Ge1, Dian Li2, and Ying Shan1.
1ARC Lab, Tencent PCG
2Foundation Technology Center, Tencent PCG

<a href='URL src='URL
<a href='URL src='URL
<a href='URL src='URL
This paper is a review of all the works related to vision-language instruction tuning (VLIT). We will periodically update the recent public VLIT dataset and the VLIT data constructed by the pipeline in this paper.
---
Schedule
--------
* [ ] Release New Vision-Language Instruction Data (periodically) ...
* [ ] Update Public VLIT Datasets and Related Work (periodically) ...
* [ ] Release Construction Tools
* [x] [2023.11.16] Release Instruction Data
* [x] [2023.11.15] Paper Released (ArXiv)
️ Catalogue
-----------
1. [Existing VLIT Data](#label_evd)
2. [VLIT Data Constructed in This Paper](#label_vdctp)
️ Existing VLIT Dataset
-----------------------
Currently, the existing VLIT generation schemes can be divided into two categories, among which Annotation Adaption mainly relies on directly adjusting and rewriting the existing annotation data to adapt to the VLIT data template. Self-Instruct relies on the Large Language Model (LLM) to synthesize annotation data from more sources and reorganize it to generate VLIT data with more diversity and complexity (of course, it also brings more noise and hallucination).
### Dataset
Multi-Modal Instruction Data Constructed in This Paper
------------------------------------------------------
In this paper, we propose a vision-language instruction construction pipeline and generate a corresponding dataset.
Specifically, the generated instruction data is a multi-round question answering about a given image.
Here are some examples of the generated instruction data:
### Examples
There are three different types of instruction data. The data statistics and download links are as follows.
### Download Links
### Data Format
Citation
--------
If you found this repository useful, please consider citing:
Acknowledgement
---------------
We would like to thank LLaVA, LAVIS and OpenFlamingo for their well-architcated multi-modal LLMs.
Thanks to SEED-Bench for being an open source and convenient benchmark for evaluating MLLMs.
| [
"### Dataset\n\n\n\n \n\n\nMulti-Modal Instruction Data Constructed in This Paper\n------------------------------------------------------\n\n\nIn this paper, we propose a vision-language instruction construction pipeline and generate a corresponding dataset.\nSpecifically, the generated instruction data is a multi-round question answering about a given image.\nHere are some examples of the generated instruction data:",
"### Examples\n\n\n\nThere are three different types of instruction data. The data statistics and download links are as follows.",
"### Download Links",
"### Data Format\n\n\nCitation\n--------\n\n\nIf you found this repository useful, please consider citing:\n\n\nAcknowledgement\n---------------\n\n\nWe would like to thank LLaVA, LAVIS and OpenFlamingo for their well-architcated multi-modal LLMs.\nThanks to SEED-Bench for being an open source and convenient benchmark for evaluating MLLMs."
]
| [
"TAGS\n#arxiv-2311.08172 #region-us \n",
"### Dataset\n\n\n\n \n\n\nMulti-Modal Instruction Data Constructed in This Paper\n------------------------------------------------------\n\n\nIn this paper, we propose a vision-language instruction construction pipeline and generate a corresponding dataset.\nSpecifically, the generated instruction data is a multi-round question answering about a given image.\nHere are some examples of the generated instruction data:",
"### Examples\n\n\n\nThere are three different types of instruction data. The data statistics and download links are as follows.",
"### Download Links",
"### Data Format\n\n\nCitation\n--------\n\n\nIf you found this repository useful, please consider citing:\n\n\nAcknowledgement\n---------------\n\n\nWe would like to thank LLaVA, LAVIS and OpenFlamingo for their well-architcated multi-modal LLMs.\nThanks to SEED-Bench for being an open source and convenient benchmark for evaluating MLLMs."
]
| [
15,
79,
26,
4,
84
]
| [
"passage: TAGS\n#arxiv-2311.08172 #region-us \n### Dataset\n\n\n\n \n\n\nMulti-Modal Instruction Data Constructed in This Paper\n------------------------------------------------------\n\n\nIn this paper, we propose a vision-language instruction construction pipeline and generate a corresponding dataset.\nSpecifically, the generated instruction data is a multi-round question answering about a given image.\nHere are some examples of the generated instruction data:### Examples\n\n\n\nThere are three different types of instruction data. The data statistics and download links are as follows.### Download Links### Data Format\n\n\nCitation\n--------\n\n\nIf you found this repository useful, please consider citing:\n\n\nAcknowledgement\n---------------\n\n\nWe would like to thank LLaVA, LAVIS and OpenFlamingo for their well-architcated multi-modal LLMs.\nThanks to SEED-Bench for being an open source and convenient benchmark for evaluating MLLMs."
]
|
a2fc77267eb4338ab226dfa62e984cd22706b254 |
# Dataset Card for LegalOps JustiaCorpus
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **https://github.com/stfc/Justia-LegalOps**
- **https://github.com/stfc/Justia-LegalOps**
- **Paper:**
- **Leaderboard:**
- **[email protected]**
### Dataset Summary
This is an English-Language dataset consisting of US Supreme and Federal Court cases, with fulltexts and summaries. The dataset comprises approximately 600 Supreme court cases with Summaries and Syllabi, as well as those with missing data scraped from https://supreme.justia.com/.
As the highest court in the nation, the U.S. Supreme Court has shaped the rights and freedoms of Americans since the Founding. Justia provides a free collection of all U.S. Supreme Court decisions from 1791 to the present.
The federal court data is scraped from https://law.justia.com/cases/federal/, it is more sparse than the Supreme Court data, with approximately 17,000 Cases out of a total of 284,000 scraped from the records with non-zero length Fulltext and Summaries.
| Dataset Split | Number of Rows |
| --------- | --------- |
| `federal` | 284011 |
| `federal-clean` | 16818 |
| `supreme` | 1022 |
| `supreme-clean` | 593 |
### Supported Tasks and Leaderboards
* Text Summarisation
### Languages
* English - (n.b. - The BCP-47 code for English as generally spoken in the United States is en-US)
## Dataset Structure
### Data Instances
The data instances for the splits differ slightly between the Federal and Supreme Court data
#### Supreme Court Data
Each instance in the "supreme" consists of a string fulltext field containing the Syllabus of the case (`Syllabus`), a string summary field containing a human-written summary of the case. There are two other significant data fields for the case, one containing the opinion (`Opinion`), the other (optionally) the Dissent (`Dissent`). The other fields are metadata for the case scrape, a string tag for identifiying "federal" or "supreme" court should the splits be combined (`tag`), the scraped URL (`url`), a list of a dictionary of metadata for the case itself (`metadata`), a list of file URLs (`file_urls`) and a more complete data structure (`files`) consisting of a list of dictionaries for each PDF downloaded. The "path" key in this datastructure relates to the location within the associate PDF tarball (see (# Additional Data)[# Additional Data])
```
{
"Syllabus": "See United States v. Detroit Timber & Lumber Co.,\n\n200 U.S. 321, 337.\nSUPREME COURT OF THE UNITED STATES\nSyllabus\nGREENE, aka TRICE v. FISHER, SUPERINTENDENT, STATE CORRECTIONAL INSTITUTION AT SMITHFIELD, et al.\ncertiorari to the united states court of appeals for the third circuit\nNo. 10\u2013637.\u2003Argued October 11, 2011\u2014Decided November 8, 2011\nDuring petitioner Greene\u2019s trial for murder, robbery, and conspiracy, the prosecution introduced the redacted confessions of two of Greene\u2019s nontestifying codefendants. A jury convicted Greene. The Pennsylvania Superior Court...",
"Dissent": "",
"Opinion": "SUPREME COURT OF THE UNITED STATES\n_________________\nNo. 10\u2013637\n_________________\nERIC GREENE, aka JARMAINE Q. TRICE, PETI- TIONER v. JON FISHER, SUPERINTENDENT, STATE CORRECTIONAL INSTITUTION AT SMITHFIELD, et al.\non writ of certiorari to the united states court of appeals for the third circuit\n[November 8, 2011]\n\nJustice Scalia delivered the opinion of the Court.\nUnder the Antiterrorism and Effective Death Penalty Act of 1996 (AEDPA), a federal court may not grant habeas relief to a state prisoner...,
"tag": "supreme",
"url": "https://supreme.justia.com/cases/federal/us/565/34/",
"file_urls": [
"https://supreme.justia.com/cases/federal/us/565/10-637/case.pdf"
],
"files": [
{
"checksum": "7364db9dec242c4bf751cddd1082c714",
"path": "full/aff7f0b60e06bcdc14db0962db7a187460cf3d6e.pdf",
"status": "downloaded",
"url": "https://supreme.justia.com/cases/federal/us/565/10-637/case.pdf"
}
],
"metadata": {
"Advocates": null,
"Argued": "October 11, 2011",
"Decided": "November 8, 2011",
"Docket No.": "10-637",
"First Party": "Eric Greene, aka Jarmaine Q. Trice",
"Granted": "April 4, 2011",
"Juris Postponed": null,
"Official Citation": "565 U.S. 34",
"Reargued": null,
"Second Party": "Jon Fisher, Superintendent, State Correctional Institution at Smithfield, et al.",
"page": 34,
"volume": 565
},
}
```
#### Federal Court Data
Each instance consists of a string fulltext field containing the fulltext of the case, a string summary field containing a human-written summary of the case. The other fields are metadata for the case scrape, a string tag for identifiying "federal" or "supreme" court should the splits be combined (`tag`), the scraped URL (`url`), a list of a dictionary of metadata for the case itself (`metadata`), a list of file URLs (`file_urls`) and a more complete data structure (`files`) consisting of a list of dictionaries for each PDF downloaded. The "path" key in this datastructure relates to the location within the associate PDF tarball (see (# Additional Data)[# Additional Data])
```json
{
"fulltext": "Appeal from judgment of the United States District Court for the Western District of New York (Telesca, J.). The district court denied Petitioner habeas corpus relief after finding that Petitioner did not derive citizenship from his father; the district court ruled that Petitioner was not in his father s legal custody when his father naturalized. We conclude that the district court erred because it relied on an unenforceable custody award. Legal custody ...",
"summary": "Petitioner appealed from the district court's denial of habeas corpus relief after finding that he did not derive citizenship from his father. The district court ruled that petitioner was not in his father's \"legal custody\" when his father naturalized. The court concluded that the district court erred because it relied on an unenforceable Dominican Republic custody award where New York had jurisdiction to determine custody. Accordingly, the court vacated the judgment and remanded for further proceedings.",
"tag": "federal",
"url": "https://law.justia.com/cases/federal/appellate-courts/ca2/09-4211/09-4211_opn-2011-12-29.html",
"file_urls": [
"https://cases.justia.com/federal/appellate-courts/ca2/09-4211/09-4211_opn-2011-12-29.pdf"
],
"files": [
{
"checksum": "c48f9dd5a186a0e4dde4259085d99840",
"path": "full/e38b7ce3ff0e4f83a100f5e2cc57552591d033b0.pdf",
"status": "downloaded",
"url": "https://cases.justia.com/federal/appellate-courts/ca2/09-4211/09-4211_opn-2011-12-29.pdf"
}
],
"metadata": {
"court_id": "ca2",
"date": "2011-12-29",
"number": "09-4211",
"title": "Garcia v. USICE (Dept. of Homeland Security), No. 09-4211 (2d Cir. 2011)"
}
}
```
### Data Fields
[More Information Needed]
### Data Splits
This dataset is comprised of four splits: a source-quality version (`federal`/`supreme`)and a pre-processed version (`federal-clean`/`supreme-clean`). These are **not** currently further subdivided into `train/test/eval` splits. The default split is the `supreme` split.
```
JustiaCorpus/
├── README.md
├── federal/
├── federal-clean/
├── supreme/
└── supreme-clean/
```
The splits can be loaded as follows:
```python
ds = load_dataset(
"HartreeCentre/JustiaCorpus",
split,
token=HF_TOKEN_VALUE,
)["train"]
```
where `split` is one of `["supreme", "federal", "supreme-clean", "federal-clean"]`.
### Additional Data
Within this repository, two tarballs containing the full PDF documents for each dataset can be found in the corresponding `federal/` and `supreme/` datasets.
* `federal/federal_pdfs.tar.gz`
* `supreme/supreme_pdfs.tar.gz`
to extract these, clone this dataset repo, navigate to the directory and untar:
```bash
tar -xzvf federal_pdfs.tar.gz
```
The mapping between case and PDF is stored in the "files" field in the dataset.
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Personal and Sensitive Information
This dataset is not anonymized, so individuals' names can be found in the dataset
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
```
@INPROCEEDINGS{9378308,
author={Gargett, Andrew and Firth, Rob and Aletras, Nikolaos},
booktitle={2020 IEEE International Conference on Big Data (Big Data)},
title={LegalOps: A Summarization Corpus of Legal Opinions},
year={2020},
volume={},
number={},
pages={2117-2120},
doi={10.1109/BigData50022.2020.9378308}}
```
### Contributions
Thanks to [@RobFirth](https://github.com/RobFirth) for adding this dataset. | HartreeCentre/JustiaCorpus | [
"task_categories:summarization",
"task_categories:text2text-generation",
"task_ids:text-simplification",
"annotations_creators:expert-generated",
"language_creators:found",
"language_creators:expert-generated",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:original",
"language:en",
"license:cc-by-4.0",
"legal",
"region:us"
]
| 2023-11-15T09:45:36+00:00 | {"annotations_creators": ["expert-generated"], "language_creators": ["found", "expert-generated"], "language": ["en"], "license": "cc-by-4.0", "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["summarization", "text2text-generation"], "task_ids": ["text-simplification"], "pretty_name": "LegalOps: A summarisation corpus of Federal and Supreme Court Opinions from the Justia Portal", "dataset_info": [{"config_name": "default", "splits": [{"name": "train", "num_bytes": 33980817, "num_examples": 1022}], "download_size": 17759423, "dataset_size": 33980817}, {"config_name": "federal", "features": [{"name": "fulltext", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "tag", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "file_urls", "sequence": "string"}, {"name": "files", "list": [{"name": "checksum", "dtype": "string"}, {"name": "path", "dtype": "string"}, {"name": "status", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "metadata", "struct": [{"name": "court_id", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "number", "dtype": "string"}, {"name": "title", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 625693535, "num_examples": 284011}], "download_size": 309803008, "dataset_size": 625693535}, {"config_name": "federal-clean", "features": [{"name": "fulltext", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "tag", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "file_urls", "sequence": "string"}, {"name": "files", "list": [{"name": "checksum", "dtype": "string"}, {"name": "path", "dtype": "string"}, {"name": "status", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "metadata", "struct": [{"name": "court_id", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "number", "dtype": "string"}, {"name": "title", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 585523330, "num_examples": 16745}], "download_size": 301381329, "dataset_size": 585523330}, {"config_name": "supreme", "features": [{"name": "Syllabus", "dtype": "string"}, {"name": "Dissent", "dtype": "string"}, {"name": "Opinion", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "tag", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "file_urls", "sequence": "string"}, {"name": "files", "list": [{"name": "checksum", "dtype": "string"}, {"name": "path", "dtype": "string"}, {"name": "status", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "metadata", "struct": [{"name": "Advocates", "dtype": "string"}, {"name": "Argued", "dtype": "string"}, {"name": "Decided", "dtype": "string"}, {"name": "Docket No.", "dtype": "string"}, {"name": "First Party", "dtype": "string"}, {"name": "Granted", "dtype": "string"}, {"name": "Juris Postponed", "dtype": "string"}, {"name": "Official Citation", "dtype": "string"}, {"name": "Reargued", "dtype": "string"}, {"name": "Second Party", "dtype": "string"}, {"name": "page", "dtype": "int64"}, {"name": "volume", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 33894538, "num_examples": 1022}], "download_size": 17739369, "dataset_size": 33894538}, {"config_name": "supreme-clean", "features": [{"name": "Syllabus", "dtype": "string"}, {"name": "Dissent", "dtype": "string"}, {"name": "Opinion", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "tag", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "file_urls", "sequence": "string"}, {"name": "files", "list": [{"name": "checksum", "dtype": "string"}, {"name": "path", "dtype": "string"}, {"name": "status", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "metadata", "struct": [{"name": "Advocates", "dtype": "string"}, {"name": "Argued", "dtype": "string"}, {"name": "Decided", "dtype": "string"}, {"name": "Docket No.", "dtype": "string"}, {"name": "First Party", "dtype": "string"}, {"name": "Granted", "dtype": "string"}, {"name": "Juris Postponed", "dtype": "string"}, {"name": "Official Citation", "dtype": "string"}, {"name": "Reargued", "dtype": "string"}, {"name": "Second Party", "dtype": "string"}, {"name": "page", "dtype": "int64"}, {"name": "volume", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 31766337, "num_examples": 593}], "download_size": 17053691, "dataset_size": 31766337}], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "supreme-clean/train-*"}]}, {"config_name": "federal", "data_files": [{"split": "train", "path": "federal/train-*"}]}, {"config_name": "federal-clean", "data_files": [{"split": "train", "path": "federal-clean/train-*"}]}, {"config_name": "supreme", "data_files": [{"split": "train", "path": "supreme/train-*"}]}, {"config_name": "supreme-clean", "data_files": [{"split": "train", "path": "supreme-clean/train-*"}]}], "tags": ["legal"]} | 2023-11-20T21:36:51+00:00 | []
| [
"en"
]
| TAGS
#task_categories-summarization #task_categories-text2text-generation #task_ids-text-simplification #annotations_creators-expert-generated #language_creators-found #language_creators-expert-generated #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-English #license-cc-by-4.0 #legal #region-us
| Dataset Card for LegalOps JustiaCorpus
======================================
Table of Contents
-----------------
* Table of Contents
* Dataset Description
+ Dataset Summary
+ Supported Tasks and Leaderboards
+ Languages
* Dataset Structure
+ Data Instances
+ Data Fields
+ Data Splits
* Dataset Creation
+ Curation Rationale
+ Source Data
+ Annotations
+ Personal and Sensitive Information
* Considerations for Using the Data
+ Social Impact of Dataset
+ Discussion of Biases
+ Other Known Limitations
* Additional Information
+ Dataset Curators
+ Licensing Information
+ Citation Information
+ Contributions
Dataset Description
-------------------
* URL
* URL
* Paper:
* Leaderboard:
* URL@URL
### Dataset Summary
This is an English-Language dataset consisting of US Supreme and Federal Court cases, with fulltexts and summaries. The dataset comprises approximately 600 Supreme court cases with Summaries and Syllabi, as well as those with missing data scraped from URL
As the highest court in the nation, the U.S. Supreme Court has shaped the rights and freedoms of Americans since the Founding. Justia provides a free collection of all U.S. Supreme Court decisions from 1791 to the present.
The federal court data is scraped from URL it is more sparse than the Supreme Court data, with approximately 17,000 Cases out of a total of 284,000 scraped from the records with non-zero length Fulltext and Summaries.
### Supported Tasks and Leaderboards
* Text Summarisation
### Languages
* English - (n.b. - The BCP-47 code for English as generally spoken in the United States is en-US)
Dataset Structure
-----------------
### Data Instances
The data instances for the splits differ slightly between the Federal and Supreme Court data
#### Supreme Court Data
Each instance in the "supreme" consists of a string fulltext field containing the Syllabus of the case ('Syllabus'), a string summary field containing a human-written summary of the case. There are two other significant data fields for the case, one containing the opinion ('Opinion'), the other (optionally) the Dissent ('Dissent'). The other fields are metadata for the case scrape, a string tag for identifiying "federal" or "supreme" court should the splits be combined ('tag'), the scraped URL ('url'), a list of a dictionary of metadata for the case itself ('metadata'), a list of file URLs ('file\_urls') and a more complete data structure ('files') consisting of a list of dictionaries for each PDF downloaded. The "path" key in this datastructure relates to the location within the associate PDF tarball (see (# Additional Data)[# Additional Data])
#### Federal Court Data
Each instance consists of a string fulltext field containing the fulltext of the case, a string summary field containing a human-written summary of the case. The other fields are metadata for the case scrape, a string tag for identifiying "federal" or "supreme" court should the splits be combined ('tag'), the scraped URL ('url'), a list of a dictionary of metadata for the case itself ('metadata'), a list of file URLs ('file\_urls') and a more complete data structure ('files') consisting of a list of dictionaries for each PDF downloaded. The "path" key in this datastructure relates to the location within the associate PDF tarball (see (# Additional Data)[# Additional Data])
### Data Fields
### Data Splits
This dataset is comprised of four splits: a source-quality version ('federal'/'supreme')and a pre-processed version ('federal-clean'/'supreme-clean'). These are not currently further subdivided into 'train/test/eval' splits. The default split is the 'supreme' split.
The splits can be loaded as follows:
where 'split' is one of '["supreme", "federal", "supreme-clean", "federal-clean"]'.
### Additional Data
Within this repository, two tarballs containing the full PDF documents for each dataset can be found in the corresponding 'federal/' and 'supreme/' datasets.
* 'federal/federal\_pdfs.URL'
* 'supreme/supreme\_pdfs.URL'
to extract these, clone this dataset repo, navigate to the directory and untar:
The mapping between case and PDF is stored in the "files" field in the dataset.
Dataset Creation
----------------
### Curation Rationale
### Source Data
#### Initial Data Collection and Normalization
#### Who are the source language producers?
### Personal and Sensitive Information
This dataset is not anonymized, so individuals' names can be found in the dataset
Considerations for Using the Data
---------------------------------
### Social Impact of Dataset
### Discussion of Biases
### Other Known Limitations
Additional Information
----------------------
### Dataset Curators
### Licensing Information
### Contributions
Thanks to @RobFirth for adding this dataset.
| [
"### Dataset Summary\n\n\nThis is an English-Language dataset consisting of US Supreme and Federal Court cases, with fulltexts and summaries. The dataset comprises approximately 600 Supreme court cases with Summaries and Syllabi, as well as those with missing data scraped from URL\n\n\nAs the highest court in the nation, the U.S. Supreme Court has shaped the rights and freedoms of Americans since the Founding. Justia provides a free collection of all U.S. Supreme Court decisions from 1791 to the present.\n\n\nThe federal court data is scraped from URL it is more sparse than the Supreme Court data, with approximately 17,000 Cases out of a total of 284,000 scraped from the records with non-zero length Fulltext and Summaries.",
"### Supported Tasks and Leaderboards\n\n\n* Text Summarisation",
"### Languages\n\n\n* English - (n.b. - The BCP-47 code for English as generally spoken in the United States is en-US)\n\n\nDataset Structure\n-----------------",
"### Data Instances\n\n\nThe data instances for the splits differ slightly between the Federal and Supreme Court data",
"#### Supreme Court Data\n\n\nEach instance in the \"supreme\" consists of a string fulltext field containing the Syllabus of the case ('Syllabus'), a string summary field containing a human-written summary of the case. There are two other significant data fields for the case, one containing the opinion ('Opinion'), the other (optionally) the Dissent ('Dissent'). The other fields are metadata for the case scrape, a string tag for identifiying \"federal\" or \"supreme\" court should the splits be combined ('tag'), the scraped URL ('url'), a list of a dictionary of metadata for the case itself ('metadata'), a list of file URLs ('file\\_urls') and a more complete data structure ('files') consisting of a list of dictionaries for each PDF downloaded. The \"path\" key in this datastructure relates to the location within the associate PDF tarball (see (# Additional Data)[# Additional Data])",
"#### Federal Court Data\n\n\nEach instance consists of a string fulltext field containing the fulltext of the case, a string summary field containing a human-written summary of the case. The other fields are metadata for the case scrape, a string tag for identifiying \"federal\" or \"supreme\" court should the splits be combined ('tag'), the scraped URL ('url'), a list of a dictionary of metadata for the case itself ('metadata'), a list of file URLs ('file\\_urls') and a more complete data structure ('files') consisting of a list of dictionaries for each PDF downloaded. The \"path\" key in this datastructure relates to the location within the associate PDF tarball (see (# Additional Data)[# Additional Data])",
"### Data Fields",
"### Data Splits\n\n\nThis dataset is comprised of four splits: a source-quality version ('federal'/'supreme')and a pre-processed version ('federal-clean'/'supreme-clean'). These are not currently further subdivided into 'train/test/eval' splits. The default split is the 'supreme' split.\n\n\nThe splits can be loaded as follows:\n\n\nwhere 'split' is one of '[\"supreme\", \"federal\", \"supreme-clean\", \"federal-clean\"]'.",
"### Additional Data\n\n\nWithin this repository, two tarballs containing the full PDF documents for each dataset can be found in the corresponding 'federal/' and 'supreme/' datasets.\n\n\n* 'federal/federal\\_pdfs.URL'\n* 'supreme/supreme\\_pdfs.URL'\n\n\nto extract these, clone this dataset repo, navigate to the directory and untar:\n\n\nThe mapping between case and PDF is stored in the \"files\" field in the dataset.\n\n\nDataset Creation\n----------------",
"### Curation Rationale",
"### Source Data",
"#### Initial Data Collection and Normalization",
"#### Who are the source language producers?",
"### Personal and Sensitive Information\n\n\nThis dataset is not anonymized, so individuals' names can be found in the dataset\n\n\nConsiderations for Using the Data\n---------------------------------",
"### Social Impact of Dataset",
"### Discussion of Biases",
"### Other Known Limitations\n\n\nAdditional Information\n----------------------",
"### Dataset Curators",
"### Licensing Information",
"### Contributions\n\n\nThanks to @RobFirth for adding this dataset."
]
| [
"TAGS\n#task_categories-summarization #task_categories-text2text-generation #task_ids-text-simplification #annotations_creators-expert-generated #language_creators-found #language_creators-expert-generated #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-English #license-cc-by-4.0 #legal #region-us \n",
"### Dataset Summary\n\n\nThis is an English-Language dataset consisting of US Supreme and Federal Court cases, with fulltexts and summaries. The dataset comprises approximately 600 Supreme court cases with Summaries and Syllabi, as well as those with missing data scraped from URL\n\n\nAs the highest court in the nation, the U.S. Supreme Court has shaped the rights and freedoms of Americans since the Founding. Justia provides a free collection of all U.S. Supreme Court decisions from 1791 to the present.\n\n\nThe federal court data is scraped from URL it is more sparse than the Supreme Court data, with approximately 17,000 Cases out of a total of 284,000 scraped from the records with non-zero length Fulltext and Summaries.",
"### Supported Tasks and Leaderboards\n\n\n* Text Summarisation",
"### Languages\n\n\n* English - (n.b. - The BCP-47 code for English as generally spoken in the United States is en-US)\n\n\nDataset Structure\n-----------------",
"### Data Instances\n\n\nThe data instances for the splits differ slightly between the Federal and Supreme Court data",
"#### Supreme Court Data\n\n\nEach instance in the \"supreme\" consists of a string fulltext field containing the Syllabus of the case ('Syllabus'), a string summary field containing a human-written summary of the case. There are two other significant data fields for the case, one containing the opinion ('Opinion'), the other (optionally) the Dissent ('Dissent'). The other fields are metadata for the case scrape, a string tag for identifiying \"federal\" or \"supreme\" court should the splits be combined ('tag'), the scraped URL ('url'), a list of a dictionary of metadata for the case itself ('metadata'), a list of file URLs ('file\\_urls') and a more complete data structure ('files') consisting of a list of dictionaries for each PDF downloaded. The \"path\" key in this datastructure relates to the location within the associate PDF tarball (see (# Additional Data)[# Additional Data])",
"#### Federal Court Data\n\n\nEach instance consists of a string fulltext field containing the fulltext of the case, a string summary field containing a human-written summary of the case. The other fields are metadata for the case scrape, a string tag for identifiying \"federal\" or \"supreme\" court should the splits be combined ('tag'), the scraped URL ('url'), a list of a dictionary of metadata for the case itself ('metadata'), a list of file URLs ('file\\_urls') and a more complete data structure ('files') consisting of a list of dictionaries for each PDF downloaded. The \"path\" key in this datastructure relates to the location within the associate PDF tarball (see (# Additional Data)[# Additional Data])",
"### Data Fields",
"### Data Splits\n\n\nThis dataset is comprised of four splits: a source-quality version ('federal'/'supreme')and a pre-processed version ('federal-clean'/'supreme-clean'). These are not currently further subdivided into 'train/test/eval' splits. The default split is the 'supreme' split.\n\n\nThe splits can be loaded as follows:\n\n\nwhere 'split' is one of '[\"supreme\", \"federal\", \"supreme-clean\", \"federal-clean\"]'.",
"### Additional Data\n\n\nWithin this repository, two tarballs containing the full PDF documents for each dataset can be found in the corresponding 'federal/' and 'supreme/' datasets.\n\n\n* 'federal/federal\\_pdfs.URL'\n* 'supreme/supreme\\_pdfs.URL'\n\n\nto extract these, clone this dataset repo, navigate to the directory and untar:\n\n\nThe mapping between case and PDF is stored in the \"files\" field in the dataset.\n\n\nDataset Creation\n----------------",
"### Curation Rationale",
"### Source Data",
"#### Initial Data Collection and Normalization",
"#### Who are the source language producers?",
"### Personal and Sensitive Information\n\n\nThis dataset is not anonymized, so individuals' names can be found in the dataset\n\n\nConsiderations for Using the Data\n---------------------------------",
"### Social Impact of Dataset",
"### Discussion of Biases",
"### Other Known Limitations\n\n\nAdditional Information\n----------------------",
"### Dataset Curators",
"### Licensing Information",
"### Contributions\n\n\nThanks to @RobFirth for adding this dataset."
]
| [
115,
165,
15,
40,
23,
246,
193,
5,
139,
129,
7,
4,
10,
10,
37,
7,
8,
14,
6,
6,
18
]
| [
"passage: TAGS\n#task_categories-summarization #task_categories-text2text-generation #task_ids-text-simplification #annotations_creators-expert-generated #language_creators-found #language_creators-expert-generated #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-English #license-cc-by-4.0 #legal #region-us \n### Dataset Summary\n\n\nThis is an English-Language dataset consisting of US Supreme and Federal Court cases, with fulltexts and summaries. The dataset comprises approximately 600 Supreme court cases with Summaries and Syllabi, as well as those with missing data scraped from URL\n\n\nAs the highest court in the nation, the U.S. Supreme Court has shaped the rights and freedoms of Americans since the Founding. Justia provides a free collection of all U.S. Supreme Court decisions from 1791 to the present.\n\n\nThe federal court data is scraped from URL it is more sparse than the Supreme Court data, with approximately 17,000 Cases out of a total of 284,000 scraped from the records with non-zero length Fulltext and Summaries.### Supported Tasks and Leaderboards\n\n\n* Text Summarisation### Languages\n\n\n* English - (n.b. - The BCP-47 code for English as generally spoken in the United States is en-US)\n\n\nDataset Structure\n-----------------### Data Instances\n\n\nThe data instances for the splits differ slightly between the Federal and Supreme Court data",
"passage: #### Supreme Court Data\n\n\nEach instance in the \"supreme\" consists of a string fulltext field containing the Syllabus of the case ('Syllabus'), a string summary field containing a human-written summary of the case. There are two other significant data fields for the case, one containing the opinion ('Opinion'), the other (optionally) the Dissent ('Dissent'). The other fields are metadata for the case scrape, a string tag for identifiying \"federal\" or \"supreme\" court should the splits be combined ('tag'), the scraped URL ('url'), a list of a dictionary of metadata for the case itself ('metadata'), a list of file URLs ('file\\_urls') and a more complete data structure ('files') consisting of a list of dictionaries for each PDF downloaded. The \"path\" key in this datastructure relates to the location within the associate PDF tarball (see (# Additional Data)[# Additional Data])#### Federal Court Data\n\n\nEach instance consists of a string fulltext field containing the fulltext of the case, a string summary field containing a human-written summary of the case. The other fields are metadata for the case scrape, a string tag for identifiying \"federal\" or \"supreme\" court should the splits be combined ('tag'), the scraped URL ('url'), a list of a dictionary of metadata for the case itself ('metadata'), a list of file URLs ('file\\_urls') and a more complete data structure ('files') consisting of a list of dictionaries for each PDF downloaded. The \"path\" key in this datastructure relates to the location within the associate PDF tarball (see (# Additional Data)[# Additional Data])### Data Fields### Data Splits\n\n\nThis dataset is comprised of four splits: a source-quality version ('federal'/'supreme')and a pre-processed version ('federal-clean'/'supreme-clean'). These are not currently further subdivided into 'train/test/eval' splits. The default split is the 'supreme' split.\n\n\nThe splits can be loaded as follows:\n\n\nwhere 'split' is one of '[\"supreme\", \"federal\", \"supreme-clean\", \"federal-clean\"]'.### Additional Data\n\n\nWithin this repository, two tarballs containing the full PDF documents for each dataset can be found in the corresponding 'federal/' and 'supreme/' datasets.\n\n\n* 'federal/federal\\_pdfs.URL'\n* 'supreme/supreme\\_pdfs.URL'\n\n\nto extract these, clone this dataset repo, navigate to the directory and untar:\n\n\nThe mapping between case and PDF is stored in the \"files\" field in the dataset.\n\n\nDataset Creation\n----------------### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?"
]
|
ae299d4f357a4362baa99f97371467386f015d22 | # About
This is the *SYNTRA Experiment Dataset*. It is a sample dataset from the NovelSense SYNTRA EU Hubs 4 Data experiment (https://euhubs4data.eu/experiments/syntra/). The experiment supported the development of a web application reachable under https://syntra.app. The dataset is a synthetic traffic infrastructure dataset e.g. for use for the validation, trainig and optimization of your traffic AI models.
# Datset description
The dataset has been created by generating 14 different visualization configurations. These include color spectrum of cars, camera noise, background, driving trajectories, among others. The dataset consists of png and xml files.
Each png files has a corresponding xml file which contians the annotation information in PascalVOC format.
The structure of the png and xml file names is a follows:
XXXXXX-C-M_frame_F.(png|xml)
* XXXXXX -- string encoding of configuration
* C -- number of the configuration
* M -- video in this configuration
* F -- frame number in this video
# Limitation
The dataset was generated using a development version of SYNTRA and contains only cars.
# License
SYNTRA Experiment Dataset © 2023 by NovelSense UG is licensed under CC BY-SA 4.0
(https://creativecommons.org/licenses/by-sa/4.0/) | NovelSense/syntra-experiment-dataset | [
"task_categories:object-detection",
"size_categories:1K<n<10K",
"license:cc-by-sa-4.0",
"traffic",
"vehicles",
"car",
"synthetic",
"mobility",
"infrastructure",
"doi:10.57967/hf/1350",
"region:us"
]
| 2023-11-15T09:48:35+00:00 | {"license": "cc-by-sa-4.0", "size_categories": ["1K<n<10K"], "task_categories": ["object-detection"], "pretty_name": "SYNTRA Experiment Dataset", "tags": ["traffic", "vehicles", "car", "synthetic", "mobility", "infrastructure"]} | 2023-11-15T11:34:45+00:00 | []
| []
| TAGS
#task_categories-object-detection #size_categories-1K<n<10K #license-cc-by-sa-4.0 #traffic #vehicles #car #synthetic #mobility #infrastructure #doi-10.57967/hf/1350 #region-us
| # About
This is the *SYNTRA Experiment Dataset*. It is a sample dataset from the NovelSense SYNTRA EU Hubs 4 Data experiment (URL The experiment supported the development of a web application reachable under URL. The dataset is a synthetic traffic infrastructure dataset e.g. for use for the validation, trainig and optimization of your traffic AI models.
# Datset description
The dataset has been created by generating 14 different visualization configurations. These include color spectrum of cars, camera noise, background, driving trajectories, among others. The dataset consists of png and xml files.
Each png files has a corresponding xml file which contians the annotation information in PascalVOC format.
The structure of the png and xml file names is a follows:
XXXXXX-C-M_frame_F.(png|xml)
* XXXXXX -- string encoding of configuration
* C -- number of the configuration
* M -- video in this configuration
* F -- frame number in this video
# Limitation
The dataset was generated using a development version of SYNTRA and contains only cars.
# License
SYNTRA Experiment Dataset © 2023 by NovelSense UG is licensed under CC BY-SA 4.0
(URL | [
"# About\nThis is the *SYNTRA Experiment Dataset*. It is a sample dataset from the NovelSense SYNTRA EU Hubs 4 Data experiment (URL The experiment supported the development of a web application reachable under URL. The dataset is a synthetic traffic infrastructure dataset e.g. for use for the validation, trainig and optimization of your traffic AI models.",
"# Datset description\nThe dataset has been created by generating 14 different visualization configurations. These include color spectrum of cars, camera noise, background, driving trajectories, among others. The dataset consists of png and xml files.\n\nEach png files has a corresponding xml file which contians the annotation information in PascalVOC format.\n\nThe structure of the png and xml file names is a follows:\nXXXXXX-C-M_frame_F.(png|xml)\n* XXXXXX -- string encoding of configuration\n* C -- number of the configuration\n* M -- video in this configuration\n* F -- frame number in this video",
"# Limitation\nThe dataset was generated using a development version of SYNTRA and contains only cars.",
"# License\nSYNTRA Experiment Dataset © 2023 by NovelSense UG is licensed under CC BY-SA 4.0 \n(URL"
]
| [
"TAGS\n#task_categories-object-detection #size_categories-1K<n<10K #license-cc-by-sa-4.0 #traffic #vehicles #car #synthetic #mobility #infrastructure #doi-10.57967/hf/1350 #region-us \n",
"# About\nThis is the *SYNTRA Experiment Dataset*. It is a sample dataset from the NovelSense SYNTRA EU Hubs 4 Data experiment (URL The experiment supported the development of a web application reachable under URL. The dataset is a synthetic traffic infrastructure dataset e.g. for use for the validation, trainig and optimization of your traffic AI models.",
"# Datset description\nThe dataset has been created by generating 14 different visualization configurations. These include color spectrum of cars, camera noise, background, driving trajectories, among others. The dataset consists of png and xml files.\n\nEach png files has a corresponding xml file which contians the annotation information in PascalVOC format.\n\nThe structure of the png and xml file names is a follows:\nXXXXXX-C-M_frame_F.(png|xml)\n* XXXXXX -- string encoding of configuration\n* C -- number of the configuration\n* M -- video in this configuration\n* F -- frame number in this video",
"# Limitation\nThe dataset was generated using a development version of SYNTRA and contains only cars.",
"# License\nSYNTRA Experiment Dataset © 2023 by NovelSense UG is licensed under CC BY-SA 4.0 \n(URL"
]
| [
72,
85,
143,
24,
27
]
| [
"passage: TAGS\n#task_categories-object-detection #size_categories-1K<n<10K #license-cc-by-sa-4.0 #traffic #vehicles #car #synthetic #mobility #infrastructure #doi-10.57967/hf/1350 #region-us \n# About\nThis is the *SYNTRA Experiment Dataset*. It is a sample dataset from the NovelSense SYNTRA EU Hubs 4 Data experiment (URL The experiment supported the development of a web application reachable under URL. The dataset is a synthetic traffic infrastructure dataset e.g. for use for the validation, trainig and optimization of your traffic AI models.# Datset description\nThe dataset has been created by generating 14 different visualization configurations. These include color spectrum of cars, camera noise, background, driving trajectories, among others. The dataset consists of png and xml files.\n\nEach png files has a corresponding xml file which contians the annotation information in PascalVOC format.\n\nThe structure of the png and xml file names is a follows:\nXXXXXX-C-M_frame_F.(png|xml)\n* XXXXXX -- string encoding of configuration\n* C -- number of the configuration\n* M -- video in this configuration\n* F -- frame number in this video# Limitation\nThe dataset was generated using a development version of SYNTRA and contains only cars.# License\nSYNTRA Experiment Dataset © 2023 by NovelSense UG is licensed under CC BY-SA 4.0 \n(URL"
]
|
ddaec7a942ac8a618d1835165308568c39695ba4 | # Dataset Card for "analyst_report_raw"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | makedelta/analyst_report_raw | [
"region:us"
]
| 2023-11-15T09:54:28+00:00 | {"dataset_info": {"features": [{"name": "content", "dtype": "string"}, {"name": "created_at", "dtype": "string"}, {"name": "pdf_content", "dtype": "string"}, {"name": "sentiment", "dtype": "float64"}, {"name": "summary1", "dtype": "string"}, {"name": "summary2", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "updated_at", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 151542728, "num_examples": 5847}], "download_size": 77730208, "dataset_size": 151542728}} | 2023-11-15T09:54:42+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "analyst_report_raw"
More Information needed | [
"# Dataset Card for \"analyst_report_raw\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"analyst_report_raw\"\n\nMore Information needed"
]
| [
6,
16
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"analyst_report_raw\"\n\nMore Information needed"
]
|
279868a1cf863b6da1e590f66cf5f08963da6630 | Game compositions created by users | hubei-hunan/games | [
"license:mit",
"region:us"
]
| 2023-11-15T10:08:48+00:00 | {"license": "mit"} | 2024-02-04T00:08:14+00:00 | []
| []
| TAGS
#license-mit #region-us
| Game compositions created by users | []
| [
"TAGS\n#license-mit #region-us \n"
]
| [
11
]
| [
"passage: TAGS\n#license-mit #region-us \n"
]
|
08416109936a25f921dd8f60edf56b2cd90de41d | Subset of the Amsterdam Urban Sounds Dataset.
The dataset was created by Sensemakers and Sensing Clues in collaboration with Waag.
Data was collected in 2018 and 2019 in Amsterdam.
Of 9 classes there are approximately 25 samples in .wav format.
| MichielBontenbal/UrbanSounds | [
"license:openrail",
"region:us"
]
| 2023-11-15T10:26:56+00:00 | {"license": "openrail"} | 2023-11-15T10:56:19+00:00 | []
| []
| TAGS
#license-openrail #region-us
| Subset of the Amsterdam Urban Sounds Dataset.
The dataset was created by Sensemakers and Sensing Clues in collaboration with Waag.
Data was collected in 2018 and 2019 in Amsterdam.
Of 9 classes there are approximately 25 samples in .wav format.
| []
| [
"TAGS\n#license-openrail #region-us \n"
]
| [
12
]
| [
"passage: TAGS\n#license-openrail #region-us \n"
]
|
7cbbe44cbf031e58520be822898a9fe01d31ad5d | # Dataset Card for "vsum_advtest"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Xapien/vsum_advtest | [
"region:us"
]
| 2023-11-15T10:33:34+00:00 | {"dataset_info": {"features": [{"name": "entity_description_A", "dtype": "string"}, {"name": "entity_description_B", "dtype": "string"}, {"name": "entity_A", "dtype": "string"}, {"name": "entity_B", "dtype": "string"}, {"name": "is_same_entity", "dtype": "bool"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 166671, "num_examples": 452}, {"name": "test", "num_bytes": 41063, "num_examples": 114}], "download_size": 36596, "dataset_size": 207734}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]} | 2023-11-15T10:33:44+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "vsum_advtest"
More Information needed | [
"# Dataset Card for \"vsum_advtest\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"vsum_advtest\"\n\nMore Information needed"
]
| [
6,
16
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"vsum_advtest\"\n\nMore Information needed"
]
|
9618e31f8836cf84db9a33a4710445035fc6142d |
# Dataset Card for Evaluation run of itsliupeng/llama2_7b_zh
## Dataset Description
- **Homepage:**
- **Repository:** https://huggingface.co/itsliupeng/llama2_7b_zh
- **Paper:**
- **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard
- **Point of Contact:** [email protected]
### Dataset Summary
Dataset automatically created during the evaluation run of model [itsliupeng/llama2_7b_zh](https://huggingface.co/itsliupeng/llama2_7b_zh) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).
The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset("open-llm-leaderboard/details_itsliupeng__llama2_7b_zh_public",
"harness_winogrande_5",
split="train")
```
## Latest results
These are the [latest results from run 2023-11-15T10:51:37.128756](https://huggingface.co/datasets/open-llm-leaderboard/details_itsliupeng__llama2_7b_zh_public/blob/main/results_2023-11-15T10-51-37.128756.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"acc": 0.5969511263414031,
"acc_stderr": 0.0329865461490785,
"acc_norm": 0.6078135521201408,
"acc_norm_stderr": 0.03376504385445851,
"mc1": 0.2766217870257038,
"mc1_stderr": 0.015659605755326912,
"mc2": 0.42858587749612026,
"mc2_stderr": 0.014059235435250938,
"em": 0.18791946308724833,
"em_stderr": 0.004000599568072892,
"f1": 0.23667890100671124,
"f1_stderr": 0.003992615682814011
},
"harness|arc:challenge|25": {
"acc": 0.47952218430034127,
"acc_stderr": 0.01459913135303501,
"acc_norm": 0.5204778156996587,
"acc_norm_stderr": 0.01459913135303501
},
"harness|hellaswag|10": {
"acc": 0.5608444532961562,
"acc_stderr": 0.004952698802275648,
"acc_norm": 0.7487552280422227,
"acc_norm_stderr": 0.004328425700998689
},
"harness|hendrycksTest-abstract_algebra|5": {
"acc": 0.3,
"acc_stderr": 0.046056618647183814,
"acc_norm": 0.3,
"acc_norm_stderr": 0.046056618647183814
},
"harness|hendrycksTest-anatomy|5": {
"acc": 0.5703703703703704,
"acc_stderr": 0.042763494943765995,
"acc_norm": 0.5703703703703704,
"acc_norm_stderr": 0.042763494943765995
},
"harness|hendrycksTest-astronomy|5": {
"acc": 0.6907894736842105,
"acc_stderr": 0.03761070869867479,
"acc_norm": 0.6907894736842105,
"acc_norm_stderr": 0.03761070869867479
},
"harness|hendrycksTest-business_ethics|5": {
"acc": 0.64,
"acc_stderr": 0.04824181513244218,
"acc_norm": 0.64,
"acc_norm_stderr": 0.04824181513244218
},
"harness|hendrycksTest-clinical_knowledge|5": {
"acc": 0.6566037735849056,
"acc_stderr": 0.029224526469124792,
"acc_norm": 0.6566037735849056,
"acc_norm_stderr": 0.029224526469124792
},
"harness|hendrycksTest-college_biology|5": {
"acc": 0.7013888888888888,
"acc_stderr": 0.03827052357950756,
"acc_norm": 0.7013888888888888,
"acc_norm_stderr": 0.03827052357950756
},
"harness|hendrycksTest-college_chemistry|5": {
"acc": 0.41,
"acc_stderr": 0.049431107042371025,
"acc_norm": 0.41,
"acc_norm_stderr": 0.049431107042371025
},
"harness|hendrycksTest-college_computer_science|5": {
"acc": 0.5,
"acc_stderr": 0.050251890762960605,
"acc_norm": 0.5,
"acc_norm_stderr": 0.050251890762960605
},
"harness|hendrycksTest-college_mathematics|5": {
"acc": 0.36,
"acc_stderr": 0.048241815132442176,
"acc_norm": 0.36,
"acc_norm_stderr": 0.048241815132442176
},
"harness|hendrycksTest-college_medicine|5": {
"acc": 0.6011560693641619,
"acc_stderr": 0.0373362665538351,
"acc_norm": 0.6011560693641619,
"acc_norm_stderr": 0.0373362665538351
},
"harness|hendrycksTest-college_physics|5": {
"acc": 0.37254901960784315,
"acc_stderr": 0.04810840148082634,
"acc_norm": 0.37254901960784315,
"acc_norm_stderr": 0.04810840148082634
},
"harness|hendrycksTest-computer_security|5": {
"acc": 0.72,
"acc_stderr": 0.04512608598542128,
"acc_norm": 0.72,
"acc_norm_stderr": 0.04512608598542128
},
"harness|hendrycksTest-conceptual_physics|5": {
"acc": 0.548936170212766,
"acc_stderr": 0.03252909619613197,
"acc_norm": 0.548936170212766,
"acc_norm_stderr": 0.03252909619613197
},
"harness|hendrycksTest-econometrics|5": {
"acc": 0.34210526315789475,
"acc_stderr": 0.044629175353369355,
"acc_norm": 0.34210526315789475,
"acc_norm_stderr": 0.044629175353369355
},
"harness|hendrycksTest-electrical_engineering|5": {
"acc": 0.5724137931034483,
"acc_stderr": 0.04122737111370333,
"acc_norm": 0.5724137931034483,
"acc_norm_stderr": 0.04122737111370333
},
"harness|hendrycksTest-elementary_mathematics|5": {
"acc": 0.3888888888888889,
"acc_stderr": 0.025107425481137285,
"acc_norm": 0.3888888888888889,
"acc_norm_stderr": 0.025107425481137285
},
"harness|hendrycksTest-formal_logic|5": {
"acc": 0.36507936507936506,
"acc_stderr": 0.04306241259127153,
"acc_norm": 0.36507936507936506,
"acc_norm_stderr": 0.04306241259127153
},
"harness|hendrycksTest-global_facts|5": {
"acc": 0.39,
"acc_stderr": 0.04902071300001975,
"acc_norm": 0.39,
"acc_norm_stderr": 0.04902071300001975
},
"harness|hendrycksTest-high_school_biology|5": {
"acc": 0.7064516129032258,
"acc_stderr": 0.025906087021319295,
"acc_norm": 0.7064516129032258,
"acc_norm_stderr": 0.025906087021319295
},
"harness|hendrycksTest-high_school_chemistry|5": {
"acc": 0.49261083743842365,
"acc_stderr": 0.035176035403610084,
"acc_norm": 0.49261083743842365,
"acc_norm_stderr": 0.035176035403610084
},
"harness|hendrycksTest-high_school_computer_science|5": {
"acc": 0.62,
"acc_stderr": 0.048783173121456316,
"acc_norm": 0.62,
"acc_norm_stderr": 0.048783173121456316
},
"harness|hendrycksTest-high_school_european_history|5": {
"acc": 0.7393939393939394,
"acc_stderr": 0.034277431758165236,
"acc_norm": 0.7393939393939394,
"acc_norm_stderr": 0.034277431758165236
},
"harness|hendrycksTest-high_school_geography|5": {
"acc": 0.7727272727272727,
"acc_stderr": 0.029857515673386414,
"acc_norm": 0.7727272727272727,
"acc_norm_stderr": 0.029857515673386414
},
"harness|hendrycksTest-high_school_government_and_politics|5": {
"acc": 0.8393782383419689,
"acc_stderr": 0.02649905770139744,
"acc_norm": 0.8393782383419689,
"acc_norm_stderr": 0.02649905770139744
},
"harness|hendrycksTest-high_school_macroeconomics|5": {
"acc": 0.5871794871794872,
"acc_stderr": 0.024962683564331796,
"acc_norm": 0.5871794871794872,
"acc_norm_stderr": 0.024962683564331796
},
"harness|hendrycksTest-high_school_mathematics|5": {
"acc": 0.3111111111111111,
"acc_stderr": 0.02822644674968352,
"acc_norm": 0.3111111111111111,
"acc_norm_stderr": 0.02822644674968352
},
"harness|hendrycksTest-high_school_microeconomics|5": {
"acc": 0.6764705882352942,
"acc_stderr": 0.0303883535518868,
"acc_norm": 0.6764705882352942,
"acc_norm_stderr": 0.0303883535518868
},
"harness|hendrycksTest-high_school_physics|5": {
"acc": 0.3443708609271523,
"acc_stderr": 0.038796870240733264,
"acc_norm": 0.3443708609271523,
"acc_norm_stderr": 0.038796870240733264
},
"harness|hendrycksTest-high_school_psychology|5": {
"acc": 0.8091743119266055,
"acc_stderr": 0.01684767640009109,
"acc_norm": 0.8091743119266055,
"acc_norm_stderr": 0.01684767640009109
},
"harness|hendrycksTest-high_school_statistics|5": {
"acc": 0.5185185185185185,
"acc_stderr": 0.03407632093854051,
"acc_norm": 0.5185185185185185,
"acc_norm_stderr": 0.03407632093854051
},
"harness|hendrycksTest-high_school_us_history|5": {
"acc": 0.7303921568627451,
"acc_stderr": 0.031145570659486782,
"acc_norm": 0.7303921568627451,
"acc_norm_stderr": 0.031145570659486782
},
"harness|hendrycksTest-high_school_world_history|5": {
"acc": 0.7383966244725738,
"acc_stderr": 0.028609516716994934,
"acc_norm": 0.7383966244725738,
"acc_norm_stderr": 0.028609516716994934
},
"harness|hendrycksTest-human_aging|5": {
"acc": 0.6591928251121076,
"acc_stderr": 0.0318114974705536,
"acc_norm": 0.6591928251121076,
"acc_norm_stderr": 0.0318114974705536
},
"harness|hendrycksTest-human_sexuality|5": {
"acc": 0.6870229007633588,
"acc_stderr": 0.04066962905677697,
"acc_norm": 0.6870229007633588,
"acc_norm_stderr": 0.04066962905677697
},
"harness|hendrycksTest-international_law|5": {
"acc": 0.768595041322314,
"acc_stderr": 0.03849856098794088,
"acc_norm": 0.768595041322314,
"acc_norm_stderr": 0.03849856098794088
},
"harness|hendrycksTest-jurisprudence|5": {
"acc": 0.7685185185185185,
"acc_stderr": 0.04077494709252626,
"acc_norm": 0.7685185185185185,
"acc_norm_stderr": 0.04077494709252626
},
"harness|hendrycksTest-logical_fallacies|5": {
"acc": 0.7116564417177914,
"acc_stderr": 0.03559039531617342,
"acc_norm": 0.7116564417177914,
"acc_norm_stderr": 0.03559039531617342
},
"harness|hendrycksTest-machine_learning|5": {
"acc": 0.4017857142857143,
"acc_stderr": 0.04653333146973646,
"acc_norm": 0.4017857142857143,
"acc_norm_stderr": 0.04653333146973646
},
"harness|hendrycksTest-management|5": {
"acc": 0.7766990291262136,
"acc_stderr": 0.04123553189891431,
"acc_norm": 0.7766990291262136,
"acc_norm_stderr": 0.04123553189891431
},
"harness|hendrycksTest-marketing|5": {
"acc": 0.8461538461538461,
"acc_stderr": 0.023636873317489298,
"acc_norm": 0.8461538461538461,
"acc_norm_stderr": 0.023636873317489298
},
"harness|hendrycksTest-medical_genetics|5": {
"acc": 0.74,
"acc_stderr": 0.044084400227680794,
"acc_norm": 0.74,
"acc_norm_stderr": 0.044084400227680794
},
"harness|hendrycksTest-miscellaneous|5": {
"acc": 0.7726692209450831,
"acc_stderr": 0.014987270640946012,
"acc_norm": 0.7726692209450831,
"acc_norm_stderr": 0.014987270640946012
},
"harness|hendrycksTest-moral_disputes|5": {
"acc": 0.6820809248554913,
"acc_stderr": 0.025070713719153176,
"acc_norm": 0.6820809248554913,
"acc_norm_stderr": 0.025070713719153176
},
"harness|hendrycksTest-moral_scenarios|5": {
"acc": 0.3217877094972067,
"acc_stderr": 0.015624236160792582,
"acc_norm": 0.3217877094972067,
"acc_norm_stderr": 0.015624236160792582
},
"harness|hendrycksTest-nutrition|5": {
"acc": 0.6633986928104575,
"acc_stderr": 0.02705797462449438,
"acc_norm": 0.6633986928104575,
"acc_norm_stderr": 0.02705797462449438
},
"harness|hendrycksTest-philosophy|5": {
"acc": 0.684887459807074,
"acc_stderr": 0.026385273703464496,
"acc_norm": 0.684887459807074,
"acc_norm_stderr": 0.026385273703464496
},
"harness|hendrycksTest-prehistory|5": {
"acc": 0.654320987654321,
"acc_stderr": 0.02646248777700187,
"acc_norm": 0.654320987654321,
"acc_norm_stderr": 0.02646248777700187
},
"harness|hendrycksTest-professional_accounting|5": {
"acc": 0.4645390070921986,
"acc_stderr": 0.029752389657427047,
"acc_norm": 0.4645390070921986,
"acc_norm_stderr": 0.029752389657427047
},
"harness|hendrycksTest-professional_law|5": {
"acc": 0.455019556714472,
"acc_stderr": 0.012718456618701763,
"acc_norm": 0.455019556714472,
"acc_norm_stderr": 0.012718456618701763
},
"harness|hendrycksTest-professional_medicine|5": {
"acc": 0.6286764705882353,
"acc_stderr": 0.02934980313976587,
"acc_norm": 0.6286764705882353,
"acc_norm_stderr": 0.02934980313976587
},
"harness|hendrycksTest-professional_psychology|5": {
"acc": 0.6160130718954249,
"acc_stderr": 0.01967580813528151,
"acc_norm": 0.6160130718954249,
"acc_norm_stderr": 0.01967580813528151
},
"harness|hendrycksTest-public_relations|5": {
"acc": 0.6454545454545455,
"acc_stderr": 0.045820048415054174,
"acc_norm": 0.6454545454545455,
"acc_norm_stderr": 0.045820048415054174
},
"harness|hendrycksTest-security_studies|5": {
"acc": 0.7224489795918367,
"acc_stderr": 0.02866685779027465,
"acc_norm": 0.7224489795918367,
"acc_norm_stderr": 0.02866685779027465
},
"harness|hendrycksTest-sociology|5": {
"acc": 0.8009950248756219,
"acc_stderr": 0.028231365092758406,
"acc_norm": 0.8009950248756219,
"acc_norm_stderr": 0.028231365092758406
},
"harness|hendrycksTest-us_foreign_policy|5": {
"acc": 0.79,
"acc_stderr": 0.040936018074033256,
"acc_norm": 0.79,
"acc_norm_stderr": 0.040936018074033256
},
"harness|hendrycksTest-virology|5": {
"acc": 0.5180722891566265,
"acc_stderr": 0.03889951252827216,
"acc_norm": 0.5180722891566265,
"acc_norm_stderr": 0.03889951252827216
},
"harness|hendrycksTest-world_religions|5": {
"acc": 0.7953216374269005,
"acc_stderr": 0.030944459778533207,
"acc_norm": 0.7953216374269005,
"acc_norm_stderr": 0.030944459778533207
},
"harness|truthfulqa:mc|0": {
"mc1": 0.2766217870257038,
"mc1_stderr": 0.015659605755326912,
"mc2": 0.42858587749612026,
"mc2_stderr": 0.014059235435250938
},
"harness|winogrande|5": {
"acc": 0.7174427782162589,
"acc_stderr": 0.01265406285097139
},
"harness|drop|3": {
"em": 0.18791946308724833,
"em_stderr": 0.004000599568072892,
"f1": 0.23667890100671124,
"f1_stderr": 0.003992615682814011
},
"harness|gsm8k|5": {
"acc": 0.06444275966641395,
"acc_stderr": 0.006763391728488265
}
}
```
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
[More Information Needed] | open-llm-leaderboard/details_itsliupeng__llama2_7b_zh | [
"region:us"
]
| 2023-11-15T10:54:30+00:00 | {"pretty_name": "Evaluation run of itsliupeng/llama2_7b_zh", "dataset_summary": "Dataset automatically created during the evaluation run of model [itsliupeng/llama2_7b_zh](https://huggingface.co/itsliupeng/llama2_7b_zh) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_itsliupeng__llama2_7b_zh_public\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-11-15T10:51:37.128756](https://huggingface.co/datasets/open-llm-leaderboard/details_itsliupeng__llama2_7b_zh_public/blob/main/results_2023-11-15T10-51-37.128756.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.5969511263414031,\n \"acc_stderr\": 0.0329865461490785,\n \"acc_norm\": 0.6078135521201408,\n \"acc_norm_stderr\": 0.03376504385445851,\n \"mc1\": 0.2766217870257038,\n \"mc1_stderr\": 0.015659605755326912,\n \"mc2\": 0.42858587749612026,\n \"mc2_stderr\": 0.014059235435250938,\n \"em\": 0.18791946308724833,\n \"em_stderr\": 0.004000599568072892,\n \"f1\": 0.23667890100671124,\n \"f1_stderr\": 0.003992615682814011\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.47952218430034127,\n \"acc_stderr\": 0.01459913135303501,\n \"acc_norm\": 0.5204778156996587,\n \"acc_norm_stderr\": 0.01459913135303501\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.5608444532961562,\n \"acc_stderr\": 0.004952698802275648,\n \"acc_norm\": 0.7487552280422227,\n \"acc_norm_stderr\": 0.004328425700998689\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.3,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.3,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.5703703703703704,\n \"acc_stderr\": 0.042763494943765995,\n \"acc_norm\": 0.5703703703703704,\n \"acc_norm_stderr\": 0.042763494943765995\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.6907894736842105,\n \"acc_stderr\": 0.03761070869867479,\n \"acc_norm\": 0.6907894736842105,\n \"acc_norm_stderr\": 0.03761070869867479\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.64,\n \"acc_stderr\": 0.04824181513244218,\n \"acc_norm\": 0.64,\n \"acc_norm_stderr\": 0.04824181513244218\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.6566037735849056,\n \"acc_stderr\": 0.029224526469124792,\n \"acc_norm\": 0.6566037735849056,\n \"acc_norm_stderr\": 0.029224526469124792\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.7013888888888888,\n \"acc_stderr\": 0.03827052357950756,\n \"acc_norm\": 0.7013888888888888,\n \"acc_norm_stderr\": 0.03827052357950756\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.41,\n \"acc_stderr\": 0.049431107042371025,\n \"acc_norm\": 0.41,\n \"acc_norm_stderr\": 0.049431107042371025\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.5,\n \"acc_stderr\": 0.050251890762960605,\n \"acc_norm\": 0.5,\n \"acc_norm_stderr\": 0.050251890762960605\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.36,\n \"acc_stderr\": 0.048241815132442176,\n \"acc_norm\": 0.36,\n \"acc_norm_stderr\": 0.048241815132442176\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.6011560693641619,\n \"acc_stderr\": 0.0373362665538351,\n \"acc_norm\": 0.6011560693641619,\n \"acc_norm_stderr\": 0.0373362665538351\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.37254901960784315,\n \"acc_stderr\": 0.04810840148082634,\n \"acc_norm\": 0.37254901960784315,\n \"acc_norm_stderr\": 0.04810840148082634\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.72,\n \"acc_stderr\": 0.04512608598542128,\n \"acc_norm\": 0.72,\n \"acc_norm_stderr\": 0.04512608598542128\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.548936170212766,\n \"acc_stderr\": 0.03252909619613197,\n \"acc_norm\": 0.548936170212766,\n \"acc_norm_stderr\": 0.03252909619613197\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.34210526315789475,\n \"acc_stderr\": 0.044629175353369355,\n \"acc_norm\": 0.34210526315789475,\n \"acc_norm_stderr\": 0.044629175353369355\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.5724137931034483,\n \"acc_stderr\": 0.04122737111370333,\n \"acc_norm\": 0.5724137931034483,\n \"acc_norm_stderr\": 0.04122737111370333\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.3888888888888889,\n \"acc_stderr\": 0.025107425481137285,\n \"acc_norm\": 0.3888888888888889,\n \"acc_norm_stderr\": 0.025107425481137285\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.36507936507936506,\n \"acc_stderr\": 0.04306241259127153,\n \"acc_norm\": 0.36507936507936506,\n \"acc_norm_stderr\": 0.04306241259127153\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.39,\n \"acc_stderr\": 0.04902071300001975,\n \"acc_norm\": 0.39,\n \"acc_norm_stderr\": 0.04902071300001975\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.7064516129032258,\n \"acc_stderr\": 0.025906087021319295,\n \"acc_norm\": 0.7064516129032258,\n \"acc_norm_stderr\": 0.025906087021319295\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.49261083743842365,\n \"acc_stderr\": 0.035176035403610084,\n \"acc_norm\": 0.49261083743842365,\n \"acc_norm_stderr\": 0.035176035403610084\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.62,\n \"acc_stderr\": 0.048783173121456316,\n \"acc_norm\": 0.62,\n \"acc_norm_stderr\": 0.048783173121456316\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.7393939393939394,\n \"acc_stderr\": 0.034277431758165236,\n \"acc_norm\": 0.7393939393939394,\n \"acc_norm_stderr\": 0.034277431758165236\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.7727272727272727,\n \"acc_stderr\": 0.029857515673386414,\n \"acc_norm\": 0.7727272727272727,\n \"acc_norm_stderr\": 0.029857515673386414\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.8393782383419689,\n \"acc_stderr\": 0.02649905770139744,\n \"acc_norm\": 0.8393782383419689,\n \"acc_norm_stderr\": 0.02649905770139744\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.5871794871794872,\n \"acc_stderr\": 0.024962683564331796,\n \"acc_norm\": 0.5871794871794872,\n \"acc_norm_stderr\": 0.024962683564331796\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.3111111111111111,\n \"acc_stderr\": 0.02822644674968352,\n \"acc_norm\": 0.3111111111111111,\n \"acc_norm_stderr\": 0.02822644674968352\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.6764705882352942,\n \"acc_stderr\": 0.0303883535518868,\n \"acc_norm\": 0.6764705882352942,\n \"acc_norm_stderr\": 0.0303883535518868\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.3443708609271523,\n \"acc_stderr\": 0.038796870240733264,\n \"acc_norm\": 0.3443708609271523,\n \"acc_norm_stderr\": 0.038796870240733264\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.8091743119266055,\n \"acc_stderr\": 0.01684767640009109,\n \"acc_norm\": 0.8091743119266055,\n \"acc_norm_stderr\": 0.01684767640009109\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.5185185185185185,\n \"acc_stderr\": 0.03407632093854051,\n \"acc_norm\": 0.5185185185185185,\n \"acc_norm_stderr\": 0.03407632093854051\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.7303921568627451,\n \"acc_stderr\": 0.031145570659486782,\n \"acc_norm\": 0.7303921568627451,\n \"acc_norm_stderr\": 0.031145570659486782\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.7383966244725738,\n \"acc_stderr\": 0.028609516716994934,\n \"acc_norm\": 0.7383966244725738,\n \"acc_norm_stderr\": 0.028609516716994934\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.6591928251121076,\n \"acc_stderr\": 0.0318114974705536,\n \"acc_norm\": 0.6591928251121076,\n \"acc_norm_stderr\": 0.0318114974705536\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.6870229007633588,\n \"acc_stderr\": 0.04066962905677697,\n \"acc_norm\": 0.6870229007633588,\n \"acc_norm_stderr\": 0.04066962905677697\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.768595041322314,\n \"acc_stderr\": 0.03849856098794088,\n \"acc_norm\": 0.768595041322314,\n \"acc_norm_stderr\": 0.03849856098794088\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.7685185185185185,\n \"acc_stderr\": 0.04077494709252626,\n \"acc_norm\": 0.7685185185185185,\n \"acc_norm_stderr\": 0.04077494709252626\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.7116564417177914,\n \"acc_stderr\": 0.03559039531617342,\n \"acc_norm\": 0.7116564417177914,\n \"acc_norm_stderr\": 0.03559039531617342\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.4017857142857143,\n \"acc_stderr\": 0.04653333146973646,\n \"acc_norm\": 0.4017857142857143,\n \"acc_norm_stderr\": 0.04653333146973646\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.7766990291262136,\n \"acc_stderr\": 0.04123553189891431,\n \"acc_norm\": 0.7766990291262136,\n \"acc_norm_stderr\": 0.04123553189891431\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.8461538461538461,\n \"acc_stderr\": 0.023636873317489298,\n \"acc_norm\": 0.8461538461538461,\n \"acc_norm_stderr\": 0.023636873317489298\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.74,\n \"acc_stderr\": 0.044084400227680794,\n \"acc_norm\": 0.74,\n \"acc_norm_stderr\": 0.044084400227680794\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.7726692209450831,\n \"acc_stderr\": 0.014987270640946012,\n \"acc_norm\": 0.7726692209450831,\n \"acc_norm_stderr\": 0.014987270640946012\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.6820809248554913,\n \"acc_stderr\": 0.025070713719153176,\n \"acc_norm\": 0.6820809248554913,\n \"acc_norm_stderr\": 0.025070713719153176\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.3217877094972067,\n \"acc_stderr\": 0.015624236160792582,\n \"acc_norm\": 0.3217877094972067,\n \"acc_norm_stderr\": 0.015624236160792582\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.6633986928104575,\n \"acc_stderr\": 0.02705797462449438,\n \"acc_norm\": 0.6633986928104575,\n \"acc_norm_stderr\": 0.02705797462449438\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.684887459807074,\n \"acc_stderr\": 0.026385273703464496,\n \"acc_norm\": 0.684887459807074,\n \"acc_norm_stderr\": 0.026385273703464496\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.654320987654321,\n \"acc_stderr\": 0.02646248777700187,\n \"acc_norm\": 0.654320987654321,\n \"acc_norm_stderr\": 0.02646248777700187\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.4645390070921986,\n \"acc_stderr\": 0.029752389657427047,\n \"acc_norm\": 0.4645390070921986,\n \"acc_norm_stderr\": 0.029752389657427047\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.455019556714472,\n \"acc_stderr\": 0.012718456618701763,\n \"acc_norm\": 0.455019556714472,\n \"acc_norm_stderr\": 0.012718456618701763\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.6286764705882353,\n \"acc_stderr\": 0.02934980313976587,\n \"acc_norm\": 0.6286764705882353,\n \"acc_norm_stderr\": 0.02934980313976587\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.6160130718954249,\n \"acc_stderr\": 0.01967580813528151,\n \"acc_norm\": 0.6160130718954249,\n \"acc_norm_stderr\": 0.01967580813528151\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.6454545454545455,\n \"acc_stderr\": 0.045820048415054174,\n \"acc_norm\": 0.6454545454545455,\n \"acc_norm_stderr\": 0.045820048415054174\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.7224489795918367,\n \"acc_stderr\": 0.02866685779027465,\n \"acc_norm\": 0.7224489795918367,\n \"acc_norm_stderr\": 0.02866685779027465\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.8009950248756219,\n \"acc_stderr\": 0.028231365092758406,\n \"acc_norm\": 0.8009950248756219,\n \"acc_norm_stderr\": 0.028231365092758406\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.79,\n \"acc_stderr\": 0.040936018074033256,\n \"acc_norm\": 0.79,\n \"acc_norm_stderr\": 0.040936018074033256\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.5180722891566265,\n \"acc_stderr\": 0.03889951252827216,\n \"acc_norm\": 0.5180722891566265,\n \"acc_norm_stderr\": 0.03889951252827216\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.7953216374269005,\n \"acc_stderr\": 0.030944459778533207,\n \"acc_norm\": 0.7953216374269005,\n \"acc_norm_stderr\": 0.030944459778533207\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.2766217870257038,\n \"mc1_stderr\": 0.015659605755326912,\n \"mc2\": 0.42858587749612026,\n \"mc2_stderr\": 0.014059235435250938\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7174427782162589,\n \"acc_stderr\": 0.01265406285097139\n },\n \"harness|drop|3\": {\n \"em\": 0.18791946308724833,\n \"em_stderr\": 0.004000599568072892,\n \"f1\": 0.23667890100671124,\n \"f1_stderr\": 0.003992615682814011\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.06444275966641395,\n \"acc_stderr\": 0.006763391728488265\n }\n}\n```", "repo_url": "https://huggingface.co/itsliupeng/llama2_7b_zh", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|arc:challenge|25_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_drop_3", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|drop|3_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|gsm8k|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hellaswag|10_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-management|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-virology|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-anatomy|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-astronomy|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-college_biology|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-college_physics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-computer_security|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-econometrics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-global_facts|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-human_aging|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-international_law|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-management|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-marketing|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-nutrition|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-philosophy|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-prehistory|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-professional_law|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-public_relations|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-security_studies|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-sociology|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-virology|5_2023-11-15T10-51-37.128756.parquet", "**/details_harness|hendrycksTest-world_religions|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-management|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-virology|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|truthfulqa:mc|0_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["**/details_harness|winogrande|5_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-11-15T10-51-37.128756.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_11_15T10_51_37.128756", "path": ["results_2023-11-15T10-51-37.128756.parquet"]}, {"split": "latest", "path": ["results_2023-11-15T10-51-37.128756.parquet"]}]}]} | 2023-11-15T10:55:15+00:00 | []
| []
| TAGS
#region-us
|
# Dataset Card for Evaluation run of itsliupeng/llama2_7b_zh
## Dataset Description
- Homepage:
- Repository: URL
- Paper:
- Leaderboard: URL
- Point of Contact: clementine@URL
### Dataset Summary
Dataset automatically created during the evaluation run of model itsliupeng/llama2_7b_zh on the Open LLM Leaderboard.
The dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).
To load the details from a run, you can for instance do the following:
## Latest results
These are the latest results from run 2023-11-15T10:51:37.128756(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
### Supported Tasks and Leaderboards
### Languages
## Dataset Structure
### Data Instances
### Data Fields
### Data Splits
## Dataset Creation
### Curation Rationale
### Source Data
#### Initial Data Collection and Normalization
#### Who are the source language producers?
### Annotations
#### Annotation process
#### Who are the annotators?
### Personal and Sensitive Information
## Considerations for Using the Data
### Social Impact of Dataset
### Discussion of Biases
### Other Known Limitations
## Additional Information
### Dataset Curators
### Licensing Information
### Contributions
| [
"# Dataset Card for Evaluation run of itsliupeng/llama2_7b_zh",
"## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL",
"### Dataset Summary\n\nDataset automatically created during the evaluation run of model itsliupeng/llama2_7b_zh on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:",
"## Latest results\n\nThese are the latest results from run 2023-11-15T10:51:37.128756(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):",
"### Supported Tasks and Leaderboards",
"### Languages",
"## Dataset Structure",
"### Data Instances",
"### Data Fields",
"### Data Splits",
"## Dataset Creation",
"### Curation Rationale",
"### Source Data",
"#### Initial Data Collection and Normalization",
"#### Who are the source language producers?",
"### Annotations",
"#### Annotation process",
"#### Who are the annotators?",
"### Personal and Sensitive Information",
"## Considerations for Using the Data",
"### Social Impact of Dataset",
"### Discussion of Biases",
"### Other Known Limitations",
"## Additional Information",
"### Dataset Curators",
"### Licensing Information",
"### Contributions"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for Evaluation run of itsliupeng/llama2_7b_zh",
"## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL",
"### Dataset Summary\n\nDataset automatically created during the evaluation run of model itsliupeng/llama2_7b_zh on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:",
"## Latest results\n\nThese are the latest results from run 2023-11-15T10:51:37.128756(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):",
"### Supported Tasks and Leaderboards",
"### Languages",
"## Dataset Structure",
"### Data Instances",
"### Data Fields",
"### Data Splits",
"## Dataset Creation",
"### Curation Rationale",
"### Source Data",
"#### Initial Data Collection and Normalization",
"#### Who are the source language producers?",
"### Annotations",
"#### Annotation process",
"#### Who are the annotators?",
"### Personal and Sensitive Information",
"## Considerations for Using the Data",
"### Social Impact of Dataset",
"### Discussion of Biases",
"### Other Known Limitations",
"## Additional Information",
"### Dataset Curators",
"### Licensing Information",
"### Contributions"
]
| [
6,
21,
31,
170,
66,
10,
4,
6,
6,
5,
5,
5,
7,
4,
10,
10,
5,
5,
9,
8,
8,
7,
8,
7,
5,
6,
6,
5
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of itsliupeng/llama2_7b_zh## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model itsliupeng/llama2_7b_zh on the Open LLM Leaderboard.\n\nThe dataset is composed of 64 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-11-15T10:51:37.128756(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions"
]
|
c0cf509d7672fac41ea5660736d3f0e6e4d95823 | # Dataset Card for "imdb-sample"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | paduraru2009/imdb-sample | [
"region:us"
]
| 2023-11-15T11:09:42+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "label", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 40027944, "num_examples": 30000}, {"name": "validation", "num_bytes": 39047740, "num_examples": 30000}], "download_size": 50419531, "dataset_size": 79075684}} | 2023-11-15T13:39:28+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "imdb-sample"
More Information needed | [
"# Dataset Card for \"imdb-sample\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"imdb-sample\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"imdb-sample\"\n\nMore Information needed"
]
|
aae7f2c92e2603cb9306b74c7ceee8dced37e641 | # Dataset Card for "llama-prompt"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | aops02/llama-prompt | [
"region:us"
]
| 2023-11-15T11:32:00+00:00 | {"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 341658, "num_examples": 1200}], "download_size": 131023, "dataset_size": 341658}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T11:50:21+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "llama-prompt"
More Information needed | [
"# Dataset Card for \"llama-prompt\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"llama-prompt\"\n\nMore Information needed"
]
| [
6,
16
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"llama-prompt\"\n\nMore Information needed"
]
|
819205a761689f248e62e5ce261cb45453132c9a | # Dataset Card for "dogSamples"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | empbetty/dogSamples | [
"region:us"
]
| 2023-11-15T11:38:04+00:00 | {"dataset_info": {"features": [{"name": "caption", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 784103393.0, "num_examples": 25064}], "download_size": 782573601, "dataset_size": 784103393.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T11:38:43+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "dogSamples"
More Information needed | [
"# Dataset Card for \"dogSamples\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"dogSamples\"\n\nMore Information needed"
]
| [
6,
13
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"dogSamples\"\n\nMore Information needed"
]
|
5c1da51fd611d567241d57971880e28df1c39650 | # Dataset Card for "LLM_data2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | GHOFRANEE/LLM_data2 | [
"region:us"
]
| 2023-11-15T11:38:05+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5201350, "num_examples": 2450}], "download_size": 45913, "dataset_size": 5201350}} | 2023-11-15T11:38:08+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "LLM_data2"
More Information needed | [
"# Dataset Card for \"LLM_data2\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"LLM_data2\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"LLM_data2\"\n\nMore Information needed"
]
|
814ad1783cdb7f0193dd10b74ef64a20a87a20c5 | # Dataset Card for "arxiv_articles_new_dataset_with_citation_info_and_hub_info"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | librarian-bots/arxiv_articles_new_dataset_with_citation_info_and_hub_info | [
"region:us"
]
| 2023-11-15T12:02:23+00:00 | {"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "submitter", "dtype": "string"}, {"name": "authors", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "comments", "dtype": "string"}, {"name": "journal-ref", "dtype": "string"}, {"name": "doi", "dtype": "string"}, {"name": "report-no", "dtype": "string"}, {"name": "categories", "dtype": "string"}, {"name": "license", "dtype": "string"}, {"name": "abstract", "dtype": "string"}, {"name": "versions", "list": [{"name": "version", "dtype": "string"}, {"name": "created", "dtype": "string"}]}, {"name": "update_date", "dtype": "timestamp[s]"}, {"name": "authors_parsed", "sequence": {"sequence": "string"}}, {"name": "prediction", "dtype": "string"}, {"name": "probability", "dtype": "float64"}, {"name": "citationCount", "dtype": "int64"}, {"name": "is_cited_on_hub", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 70298541, "num_examples": 42202}], "download_size": 39466135, "dataset_size": 70298541}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T12:02:27+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "arxiv_articles_new_dataset_with_citation_info_and_hub_info"
More Information needed | [
"# Dataset Card for \"arxiv_articles_new_dataset_with_citation_info_and_hub_info\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"arxiv_articles_new_dataset_with_citation_info_and_hub_info\"\n\nMore Information needed"
]
| [
6,
34
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"arxiv_articles_new_dataset_with_citation_info_and_hub_info\"\n\nMore Information needed"
]
|
71ebe59808eccbe0dbbfaaf0d9bd98e635b5c272 | # Dataset Card for "enem-2023-dia-2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | dominguesm/enem-2023-dia-2 | [
"region:us"
]
| 2023-11-15T12:02:39+00:00 | {"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "resolution", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 113896, "num_examples": 90}], "download_size": 75849, "dataset_size": 113896}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-15T12:02:41+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "enem-2023-dia-2"
More Information needed | [
"# Dataset Card for \"enem-2023-dia-2\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"enem-2023-dia-2\"\n\nMore Information needed"
]
| [
6,
17
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"enem-2023-dia-2\"\n\nMore Information needed"
]
|
95ef6dfe261c03bd107895b616b027b61904b5eb | # Dataset Card for "ultrafeedback_binarized_prefs_filtered"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | SebastianSchramm/ultrafeedback_binarized_prefs_filtered | [
"region:us"
]
| 2023-11-15T12:13:37+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train_prefs", "path": "data/train_prefs-*"}, {"split": "test_prefs", "path": "data/test_prefs-*"}]}], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "prompt_id", "dtype": "string"}, {"name": "chosen", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "rejected", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "messages", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "score_chosen", "dtype": "float64"}, {"name": "score_rejected", "dtype": "float64"}], "splits": [{"name": "train_prefs", "num_bytes": 360858348.6922183, "num_examples": 56286}, {"name": "test_prefs", "num_bytes": 11663780.3125, "num_examples": 1825}], "download_size": 180008939, "dataset_size": 372522129.0047183}} | 2023-11-15T12:14:31+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "ultrafeedback_binarized_prefs_filtered"
More Information needed | [
"# Dataset Card for \"ultrafeedback_binarized_prefs_filtered\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"ultrafeedback_binarized_prefs_filtered\"\n\nMore Information needed"
]
| [
6,
24
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"ultrafeedback_binarized_prefs_filtered\"\n\nMore Information needed"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.