sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
listlengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
listlengths
0
25
languages
listlengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
listlengths
0
352
processed_texts
listlengths
1
353
tokens_length
listlengths
1
353
input_texts
listlengths
1
40
5b0afc18982552025a974238cc885841f1e21363
# Dataset Card for "ImageNetSubsetValidate" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
DeepLearner101/ImageNetSubsetValidate
[ "region:us" ]
2023-11-02T11:02:31+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 4050613.0, "num_examples": 96}], "download_size": 4049988, "dataset_size": 4050613.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-02T11:41:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ImageNetSubsetValidate" More Information needed
[ "# Dataset Card for \"ImageNetSubsetValidate\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ImageNetSubsetValidate\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ImageNetSubsetValidate\"\n\nMore Information needed" ]
7ff9e752abe906ea56e1434f4877562933684292
## Dataset Details This is a dataset of disease names, their definitions and descriptions. The information is extracted from the Disease Ontology. ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Question** [More Information Needed] - **Context** [More Information Needed]
QuyenAnhDE/Concat_medical
[ "language:en", "medical", "region:us" ]
2023-11-02T11:05:16+00:00
{"language": ["en"], "tags": ["medical"]}
2023-11-02T11:12:06+00:00
[]
[ "en" ]
TAGS #language-English #medical #region-us
## Dataset Details This is a dataset of disease names, their definitions and descriptions. The information is extracted from the Disease Ontology. ### Dataset Description - Question - Context
[ "## Dataset Details\nThis is a dataset of disease names, their definitions and descriptions.\n\nThe information is extracted from the Disease Ontology.", "### Dataset Description\n\n\n\n\n- Question \n- Context" ]
[ "TAGS\n#language-English #medical #region-us \n", "## Dataset Details\nThis is a dataset of disease names, their definitions and descriptions.\n\nThe information is extracted from the Disease Ontology.", "### Dataset Description\n\n\n\n\n- Question \n- Context" ]
[ 13, 32, 10 ]
[ "passage: TAGS\n#language-English #medical #region-us \n## Dataset Details\nThis is a dataset of disease names, their definitions and descriptions.\n\nThe information is extracted from the Disease Ontology.### Dataset Description\n\n\n\n\n- Question \n- Context" ]
54535024145e6ee48a602e855980e38d8702a7eb
# Dataset Card for "CommentwordExpo_Eng-TH" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Nathamon/CommentwordExpo_Eng-TH
[ "region:us" ]
2023-11-02T11:15:37+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "sentence", "dtype": "string"}, {"name": "cleaned_sentence", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 2858226, "num_examples": 12407}], "download_size": 1570070, "dataset_size": 2858226}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-02T11:15:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "CommentwordExpo_Eng-TH" More Information needed
[ "# Dataset Card for \"CommentwordExpo_Eng-TH\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"CommentwordExpo_Eng-TH\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"CommentwordExpo_Eng-TH\"\n\nMore Information needed" ]
dcfb09d98d49cf8eb44a257a26cd7f7cef275c9c
# Dataset Card for "agemo_json_prompts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jackhogan/agemo_json_prompts
[ "region:us" ]
2023-11-02T11:17:04+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "correct", "dtype": "string"}, {"name": "scrambled", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5053590.0, "num_examples": 5319}, {"name": "test", "num_bytes": 561510.0, "num_examples": 591}], "download_size": 1349329, "dataset_size": 5615100.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2023-11-02T16:19:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "agemo_json_prompts" More Information needed
[ "# Dataset Card for \"agemo_json_prompts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"agemo_json_prompts\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"agemo_json_prompts\"\n\nMore Information needed" ]
46cf90fdfab4634133967ff9e046fe27db6afa4e
# Dataset Card for "dataset4sentinement_HSE3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
marcus2000/dataset4sentinement_HSE3
[ "region:us" ]
2023-11-02T11:31:58+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "sentiment", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 17304904.8, "num_examples": 13662}, {"name": "test", "num_bytes": 1922767.2, "num_examples": 1518}], "download_size": 9942465, "dataset_size": 19227672.0}}
2023-11-02T11:32:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dataset4sentinement_HSE3" More Information needed
[ "# Dataset Card for \"dataset4sentinement_HSE3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dataset4sentinement_HSE3\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"dataset4sentinement_HSE3\"\n\nMore Information needed" ]
447a3501a66b00d31cf621c3a228b97aa7ba9c21
# Dataset Card for "dataset4sentinement_HSE4" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
marcus2000/dataset4sentinement_HSE4
[ "region:us" ]
2023-11-02T11:48:01+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 5012570.082761489, "num_examples": 4328}, {"name": "test", "num_bytes": 557080.9172385111, "num_examples": 481}], "download_size": 2960605, "dataset_size": 5569651.0}}
2023-11-02T11:48:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dataset4sentinement_HSE4" More Information needed
[ "# Dataset Card for \"dataset4sentinement_HSE4\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dataset4sentinement_HSE4\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"dataset4sentinement_HSE4\"\n\nMore Information needed" ]
25a6d9526bd410ce71e01f5297d6053e14beca58
# Dataset Card for "CAI_DC_1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sshreyy/CAI_DC_1
[ "region:us" ]
2023-11-02T12:18:10+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 57851094, "num_examples": 25085}, {"name": "test", "num_bytes": 9688350, "num_examples": 4203}], "download_size": 25349627, "dataset_size": 67539444}}
2023-11-02T12:18:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "CAI_DC_1" More Information needed
[ "# Dataset Card for \"CAI_DC_1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"CAI_DC_1\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"CAI_DC_1\"\n\nMore Information needed" ]
7146a639c2f45bd7aaaf3b218dc79e83aca4610d
# Dataset Card for "QoLT_apacAnd5Sentences" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
slplab/QoLT_apacAnd5Sentences
[ "region:us" ]
2023-11-02T12:36:07+00:00
{"dataset_info": {"features": [{"name": "speaker", "dtype": "string"}, {"name": "audio", "struct": [{"name": "array", "sequence": "float32"}, {"name": "path", "dtype": "string"}, {"name": "sampling_rate", "dtype": "int64"}]}, {"name": "txt", "dtype": "string"}, {"name": "severity", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 507067812, "num_examples": 2352}, {"name": "validation", "num_bytes": 62855037, "num_examples": 294}, {"name": "test", "num_bytes": 62245337, "num_examples": 294}], "download_size": 278294583, "dataset_size": 632168186}}
2023-11-02T12:36:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "QoLT_apacAnd5Sentences" More Information needed
[ "# Dataset Card for \"QoLT_apacAnd5Sentences\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"QoLT_apacAnd5Sentences\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"QoLT_apacAnd5Sentences\"\n\nMore Information needed" ]
c3e736bbe8e7646948b315603886b81ba2d1199b
# Dataset Card for "OpenOrca-zh" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SUSTech/OpenOrca-zh
[ "region:us" ]
2023-11-02T12:46:53+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "cot_gpt4", "path": "data/cot_gpt4-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "system_prompt", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "reponse", "dtype": "string"}], "splits": [{"name": "cot_gpt4", "num_bytes": 37063234, "num_examples": 39449}], "download_size": 19362531, "dataset_size": 37063234}}
2023-11-04T05:48:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "OpenOrca-zh" More Information needed
[ "# Dataset Card for \"OpenOrca-zh\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"OpenOrca-zh\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"OpenOrca-zh\"\n\nMore Information needed" ]
6392e93095ca7ca48848e3f0455a61a691388678
# Dataset Card for "find_first_sent_train_100_eval_10_dec" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/find_first_sent_train_100_eval_10_dec
[ "region:us" ]
2023-11-02T12:50:53+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}, {"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "validation", "num_bytes": 11337, "num_examples": 10}, {"name": "train", "num_bytes": 379104, "num_examples": 210}], "download_size": 197674, "dataset_size": 390441}}
2023-11-02T13:53:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "find_first_sent_train_100_eval_10_dec" More Information needed
[ "# Dataset Card for \"find_first_sent_train_100_eval_10_dec\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"find_first_sent_train_100_eval_10_dec\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"find_first_sent_train_100_eval_10_dec\"\n\nMore Information needed" ]
1ea15ab9af8c14f205accded75e908b078d75ab1
# Dataset Card for "OpenOrca-trans" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SUSTech/OpenOrca-trans
[ "region:us" ]
2023-11-02T12:53:25+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "en", "dtype": "string"}, {"name": "zh", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 39391758, "num_examples": 18786}], "download_size": 17842979, "dataset_size": 39391758}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-02T12:53:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "OpenOrca-trans" More Information needed
[ "# Dataset Card for \"OpenOrca-trans\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"OpenOrca-trans\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"OpenOrca-trans\"\n\nMore Information needed" ]
5fad43bbba4a09cd0b60c72ea3384e91e51e6641
# Dataset Card for oasst_quality_with_suggestions This dataset has been created with [Argilla](https://docs.argilla.io). As shown in the sections below, this dataset can be loaded into Argilla as explained in [Load with Argilla](#load-with-argilla), or used directly with the `datasets` library in [Load with `datasets`](#load-with-datasets). ## Dataset Description - **Homepage:** https://argilla.io - **Repository:** https://github.com/argilla-io/argilla - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named `argilla.yaml`. This configuration file will be used to configure the dataset when using the `FeedbackDataset.from_huggingface` method in Argilla. * Dataset records in a format compatible with HuggingFace `datasets`. These records will be loaded automatically when using `FeedbackDataset.from_huggingface` and can be loaded independently using the `datasets` library via `load_dataset`. * The [annotation guidelines](#annotation-guidelines) that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code: ```python import argilla as rg ds = rg.FeedbackDataset.from_huggingface("nataliaElv/oasst_quality_with_suggestions") ``` ### Load with `datasets` To load this dataset with `datasets`, you'll just need to install `datasets` as `pip install datasets --upgrade` and then use the following code: ```python from datasets import load_dataset ds = load_dataset("nataliaElv/oasst_quality_with_suggestions") ``` ### Supported Tasks and Leaderboards This dataset can contain [multiple fields, questions and responses](https://docs.argilla.io/en/latest/conceptual_guides/data_model.html#feedback-dataset) so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the [Dataset Structure section](#dataset-structure). There are no leaderboards associated with this dataset. ### Languages [More Information Needed] ## Dataset Structure ### Data in Argilla The dataset is created in Argilla with: **fields**, **questions**, **suggestions**, **metadata**, and **guidelines**. The **fields** are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. | Field Name | Title | Type | Required | Markdown | | ---------- | ----- | ---- | -------- | -------- | | prompt | Prompt | text | True | False | | response | Response | text | True | False | The **questions** are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label_selection, multi_label_selection, or ranking. | Question Name | Title | Type | Required | Description | Values/Labels | | ------------- | ----- | ---- | -------- | ----------- | ------------- | | quality | Rate the quality of the response: | rating | True | N/A | [1, 2, 3, 4, 5] | | inappropriate-content | Does the response include any of the following? | multi_label_selection | True | N/A | ['Hate speech', 'Sexual content', 'Violent content', 'Untruthful information'] | | corrected-response | If needed, provide a correction to the response | text | False | N/A | N/A | The **suggestions** are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". **✨ NEW** The **metadata** is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. The **guidelines**, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the [annotation guidelines](#annotation-guidelines) section. ### Data Instances An example of a dataset instance in Argilla looks as follows: ```json { "external_id": null, "fields": { "prompt": "Can you explain contrastive learning in machine learning in simple terms for someone new to the field of ML?", "response": "Sure! Let\u0027s say you want to build a model which can distinguish between images of cats and dogs. You gather your dataset, consisting of many cat and dog pictures. Then you put them through a neural net of your choice, which produces some representation for each image, a sequence of numbers like [0.123, 0.045, 0.334, ...]. The problem is, if your model is unfamiliar with cat and dog images, these representations will be quite random. At one time a cat and a dog picture could have very similar representations (their numbers would be close to each other), while at others two cat images may be represented far apart. In simple terms, the model wouldn\u0027t be able to tell cats and dogs apart. This is where contrastive learning comes in.\n\nThe point of contrastive learning is to take pairs of samples (in this case images of cats and dogs), then train the model to \"pull\" representations of similar pairs (cat-cat or dog-dog) closer to each other and \"push\" representations of different pairs (cat-dog) apart. After doing this for a sufficient number of steps, your model will be able to produce unique, reliable representations for cats and dogs, in essence tell them apart.\n\nThis method is not limited to images, you can typically use it with any dataset that has similar and dissimilar data points." }, "metadata": { "hate_speech": 0.18384182587122588, "sexual_content": 0.1985823986360316, "untruthful_information": 0.22438455309200378, "violent_content": 0.15704218457210023 }, "responses": [], "suggestions": [], "vectors": {} } ``` While the same record in HuggingFace `datasets` looks as follows: ```json { "corrected-response": [], "corrected-response-suggestion": null, "corrected-response-suggestion-metadata": { "agent": null, "score": null, "type": null }, "external_id": null, "inappropriate-content": [], "inappropriate-content-suggestion": null, "inappropriate-content-suggestion-metadata": { "agent": null, "score": null, "type": null }, "metadata": "{\"hate_speech\": 0.18384182587122588, \"sexual_content\": 0.1985823986360316, \"untruthful_information\": 0.22438455309200378, \"violent_content\": 0.15704218457210023}", "prompt": "Can you explain contrastive learning in machine learning in simple terms for someone new to the field of ML?", "quality": [], "quality-suggestion": null, "quality-suggestion-metadata": { "agent": null, "score": null, "type": null }, "response": "Sure! Let\u0027s say you want to build a model which can distinguish between images of cats and dogs. You gather your dataset, consisting of many cat and dog pictures. Then you put them through a neural net of your choice, which produces some representation for each image, a sequence of numbers like [0.123, 0.045, 0.334, ...]. The problem is, if your model is unfamiliar with cat and dog images, these representations will be quite random. At one time a cat and a dog picture could have very similar representations (their numbers would be close to each other), while at others two cat images may be represented far apart. In simple terms, the model wouldn\u0027t be able to tell cats and dogs apart. This is where contrastive learning comes in.\n\nThe point of contrastive learning is to take pairs of samples (in this case images of cats and dogs), then train the model to \"pull\" representations of similar pairs (cat-cat or dog-dog) closer to each other and \"push\" representations of different pairs (cat-dog) apart. After doing this for a sufficient number of steps, your model will be able to produce unique, reliable representations for cats and dogs, in essence tell them apart.\n\nThis method is not limited to images, you can typically use it with any dataset that has similar and dissimilar data points." } ``` ### Data Fields Among the dataset fields, we differentiate between the following: * **Fields:** These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. * **prompt** is of type `text`. * **response** is of type `text`. * **Questions:** These are the questions that will be asked to the annotators. They can be of different types, such as `RatingQuestion`, `TextQuestion`, `LabelQuestion`, `MultiLabelQuestion`, and `RankingQuestion`. * **quality** is of type `rating` with the following allowed values [1, 2, 3, 4, 5]. * **inappropriate-content** is of type `multi_label_selection` with the following allowed values ['Hate speech', 'Sexual content', 'Violent content', 'Untruthful information']. * (optional) **corrected-response** is of type `text`. * **Suggestions:** As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. * (optional) **quality-suggestion** is of type `rating` with the following allowed values [1, 2, 3, 4, 5]. * (optional) **inappropriate-content-suggestion** is of type `multi_label_selection` with the following allowed values ['Hate speech', 'Sexual content', 'Violent content', 'Untruthful information']. * (optional) **corrected-response-suggestion** is of type `text`. Additionally, we also have two more fields that are optional and are the following: * **✨ NEW** **metadata:** This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. * **external_id:** This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is `train`. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation guidelines [More Information Needed] #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
nataliaElv/oasst_quality_with_suggestions
[ "size_categories:1K<n<10K", "rlfh", "argilla", "human-feedback", "region:us" ]
2023-11-02T12:58:16+00:00
{"size_categories": "1K<n<10K", "tags": ["rlfh", "argilla", "human-feedback"]}
2023-11-02T12:58:21+00:00
[]
[]
TAGS #size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us
Dataset Card for oasst\_quality\_with\_suggestions ================================================== This dataset has been created with Argilla. As shown in the sections below, this dataset can be loaded into Argilla as explained in Load with Argilla, or used directly with the 'datasets' library in Load with 'datasets'. Dataset Description ------------------- * Homepage: URL * Repository: URL * Paper: * Leaderboard: * Point of Contact: ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\_huggingface' method in Argilla. * Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\_huggingface' and can be loaded independently using the 'datasets' library via 'load\_dataset'. * The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code: ### Load with 'datasets' To load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code: ### Supported Tasks and Leaderboards This dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section. There are no leaderboards associated with this dataset. ### Languages Dataset Structure ----------------- ### Data in Argilla The dataset is created in Argilla with: fields, questions, suggestions, metadata, and guidelines. The fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. The questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\_selection, multi\_label\_selection, or ranking. The suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". NEW The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. The guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section. ### Data Instances An example of a dataset instance in Argilla looks as follows: While the same record in HuggingFace 'datasets' looks as follows: ### Data Fields Among the dataset fields, we differentiate between the following: * Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. + prompt is of type 'text'. + response is of type 'text'. * Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'. + quality is of type 'rating' with the following allowed values [1, 2, 3, 4, 5]. + inappropriate-content is of type 'multi\_label\_selection' with the following allowed values ['Hate speech', 'Sexual content', 'Violent content', 'Untruthful information']. + (optional) corrected-response is of type 'text'. * Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. + (optional) quality-suggestion is of type 'rating' with the following allowed values [1, 2, 3, 4, 5]. + (optional) inappropriate-content-suggestion is of type 'multi\_label\_selection' with the following allowed values ['Hate speech', 'Sexual content', 'Violent content', 'Untruthful information']. + (optional) corrected-response-suggestion is of type 'text'. Additionally, we also have two more fields that are optional and are the following: * NEW metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. * external\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is 'train'. Dataset Creation ---------------- ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation guidelines #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information ### Contributions
[ "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nNEW The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ prompt is of type 'text'.\n\t+ response is of type 'text'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ quality is of type 'rating' with the following allowed values [1, 2, 3, 4, 5].\n\t+ inappropriate-content is of type 'multi\\_label\\_selection' with the following allowed values ['Hate speech', 'Sexual content', 'Violent content', 'Untruthful information'].\n\t+ (optional) corrected-response is of type 'text'.\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) quality-suggestion is of type 'rating' with the following allowed values [1, 2, 3, 4, 5].\n\t+ (optional) inappropriate-content-suggestion is of type 'multi\\_label\\_selection' with the following allowed values ['Hate speech', 'Sexual content', 'Violent content', 'Untruthful information'].\n\t+ (optional) corrected-response-suggestion is of type 'text'.\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* NEW metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us \n", "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nNEW The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ prompt is of type 'text'.\n\t+ response is of type 'text'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ quality is of type 'rating' with the following allowed values [1, 2, 3, 4, 5].\n\t+ inappropriate-content is of type 'multi\\_label\\_selection' with the following allowed values ['Hate speech', 'Sexual content', 'Violent content', 'Untruthful information'].\n\t+ (optional) corrected-response is of type 'text'.\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) quality-suggestion is of type 'rating' with the following allowed values [1, 2, 3, 4, 5].\n\t+ (optional) inappropriate-content-suggestion is of type 'multi\\_label\\_selection' with the following allowed values ['Hate speech', 'Sexual content', 'Violent content', 'Untruthful information'].\n\t+ (optional) corrected-response-suggestion is of type 'text'.\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* NEW metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 29, 162, 40, 53, 68, 11, 402, 40, 628, 27, 7, 4, 10, 10, 5, 5, 5, 9, 18, 7, 8, 14, 6, 6, 5 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us \n### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.### Languages\n\n\nDataset Structure\n-----------------", "passage: ### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nNEW The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:" ]
15a8143389d8a5ebda01f8dc34f0bb748a4e9289
# Line Graphics (LG) dataset This is the official page for the LG dataset, as featured in our paper [Line Graphics Digitization: A Step Towards Full Automation](https://link.springer.com/chapter/10.1007/978-3-031-41734-4_27). By [Omar Moured](https://www.linkedin.com/in/omar-moured/) et al. ## Dataset Summary The dataset includes instance segmentation masks for **400 real line chart images, manually labeled into 11 categories** by professionals. These images were collected from 5 different professions to enhance diversity. In our paper, we studied two levels of segmentation: **coarse-level**, where we segmented (spines, axis-labels, legend, lines, titles), and **fine-level**, where we further segmented each category into x and y subclasses (except for legend and lines), and individually segmented each line. ## Category ID Reference ```python class_id_mapping = { "Label": 0, "Legend": 1, "Line": 2, "Spine": 3, "Title": 4, "ptitle": 5, "xlabel": 6, "xspine": 7, "xtitle": 8, "ylabel": 9, "yspine": 10, "ytitle": 11 } ``` ## Dataset structure (train, validation, test) - **image** - contains the PIL image of the chart - **image_name** - image name with PNG extension - **width** - original image width - **height** - original image height - **instances** - contains **n** number of labeled instances, each instance dictionary has {category_id, annotations}. **The annotations are in COCO format**. ## Sample Usage ```python from datasets import load_dataset # Load the dataset dataset = load_dataset("omoured/line-graphics-dataset") # Access the training split train_dataset = dataset["train"] # Print sample data print(dataset["train"][0]) ``` You can render the masks using `pycocotools` library as follows: ```python from pycocotools import mask polygon_coords = dataset['train'][0]['instances'][1]['mask'] image_width = dataset['validation'][0]['width'] image_height = dataset['validation'][0]['height'] mask_binary = mask.frPyObjects(polygon_coords, image_height, image_width) segmentation_mask = mask.decode(mask_binary) ``` ## Copyrights This dataset is published under the CC-BY 4.0 license, which allows for unrestricted usage, but it should be cited when used. ## Citation ```bibtex @inproceedings{moured2023line, title={Line Graphics Digitization: A Step Towards Full Automation}, author={Moured, Omar and Zhang, Jiaming and Roitberg, Alina and Schwarz, Thorsten and Stiefelhagen, Rainer}, booktitle={International Conference on Document Analysis and Recognition}, pages={438--453}, year={2023}, organization={Springer} } ``` ## Contact If you have any questions or need further assistance with this dataset, please feel free to contact us: - **Omar Moured**, [email protected]
omoured/line-graphics-dataset
[ "license:cc-by-4.0", "region:us" ]
2023-11-02T13:01:47+00:00
{"license": "cc-by-4.0", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "image_name", "dtype": "string"}, {"name": "width", "dtype": "int64"}, {"name": "height", "dtype": "int64"}, {"name": "instances", "list": [{"name": "category_id", "dtype": "int64"}, {"name": "mask", "sequence": {"sequence": "float64"}}]}], "splits": [{"name": "train", "num_bytes": 8927542.0, "num_examples": 200}, {"name": "validation", "num_bytes": 4722935.0, "num_examples": 100}, {"name": "test", "num_bytes": 3984722.0, "num_examples": 100}], "download_size": 16709320, "dataset_size": 17635199.0}}
2023-11-03T09:50:24+00:00
[]
[]
TAGS #license-cc-by-4.0 #region-us
# Line Graphics (LG) dataset This is the official page for the LG dataset, as featured in our paper Line Graphics Digitization: A Step Towards Full Automation. By Omar Moured et al. ## Dataset Summary The dataset includes instance segmentation masks for 400 real line chart images, manually labeled into 11 categories by professionals. These images were collected from 5 different professions to enhance diversity. In our paper, we studied two levels of segmentation: coarse-level, where we segmented (spines, axis-labels, legend, lines, titles), and fine-level, where we further segmented each category into x and y subclasses (except for legend and lines), and individually segmented each line. ## Category ID Reference ## Dataset structure (train, validation, test) - image - contains the PIL image of the chart - image_name - image name with PNG extension - width - original image width - height - original image height - instances - contains n number of labeled instances, each instance dictionary has {category_id, annotations}. The annotations are in COCO format. ## Sample Usage You can render the masks using 'pycocotools' library as follows: ## Copyrights This dataset is published under the CC-BY 4.0 license, which allows for unrestricted usage, but it should be cited when used. ## Contact If you have any questions or need further assistance with this dataset, please feel free to contact us: - Omar Moured, URL@URL
[ "# Line Graphics (LG) dataset\n\nThis is the official page for the LG dataset, as featured in our paper Line Graphics Digitization: A Step Towards Full Automation.\n\nBy Omar Moured et al.", "## Dataset Summary\nThe dataset includes instance segmentation masks for 400 real line chart images, manually labeled into 11 categories by professionals. \nThese images were collected from 5 different professions to enhance diversity. In our paper, we studied two levels of segmentation: coarse-level,\nwhere we segmented (spines, axis-labels, legend, lines, titles), and fine-level, where we further segmented each category into x and y subclasses\n(except for legend and lines), and individually segmented each line.", "## Category ID Reference", "## Dataset structure (train, validation, test)\n- image - contains the PIL image of the chart\n- image_name - image name with PNG extension\n- width - original image width\n- height - original image height\n- instances - contains n number of labeled instances, each instance dictionary has {category_id, annotations}. The annotations are in COCO format.", "## Sample Usage\n\n\n\nYou can render the masks using 'pycocotools' library as follows:", "## Copyrights\nThis dataset is published under the CC-BY 4.0 license, which allows for unrestricted usage, but it should be cited when used.", "## Contact\n\nIf you have any questions or need further assistance with this dataset, please feel free to contact us:\n\n- Omar Moured, URL@URL" ]
[ "TAGS\n#license-cc-by-4.0 #region-us \n", "# Line Graphics (LG) dataset\n\nThis is the official page for the LG dataset, as featured in our paper Line Graphics Digitization: A Step Towards Full Automation.\n\nBy Omar Moured et al.", "## Dataset Summary\nThe dataset includes instance segmentation masks for 400 real line chart images, manually labeled into 11 categories by professionals. \nThese images were collected from 5 different professions to enhance diversity. In our paper, we studied two levels of segmentation: coarse-level,\nwhere we segmented (spines, axis-labels, legend, lines, titles), and fine-level, where we further segmented each category into x and y subclasses\n(except for legend and lines), and individually segmented each line.", "## Category ID Reference", "## Dataset structure (train, validation, test)\n- image - contains the PIL image of the chart\n- image_name - image name with PNG extension\n- width - original image width\n- height - original image height\n- instances - contains n number of labeled instances, each instance dictionary has {category_id, annotations}. The annotations are in COCO format.", "## Sample Usage\n\n\n\nYou can render the masks using 'pycocotools' library as follows:", "## Copyrights\nThis dataset is published under the CC-BY 4.0 license, which allows for unrestricted usage, but it should be cited when used.", "## Contact\n\nIf you have any questions or need further assistance with this dataset, please feel free to contact us:\n\n- Omar Moured, URL@URL" ]
[ 15, 49, 121, 4, 92, 25, 34, 31 ]
[ "passage: TAGS\n#license-cc-by-4.0 #region-us \n# Line Graphics (LG) dataset\n\nThis is the official page for the LG dataset, as featured in our paper Line Graphics Digitization: A Step Towards Full Automation.\n\nBy Omar Moured et al.## Dataset Summary\nThe dataset includes instance segmentation masks for 400 real line chart images, manually labeled into 11 categories by professionals. \nThese images were collected from 5 different professions to enhance diversity. In our paper, we studied two levels of segmentation: coarse-level,\nwhere we segmented (spines, axis-labels, legend, lines, titles), and fine-level, where we further segmented each category into x and y subclasses\n(except for legend and lines), and individually segmented each line.## Category ID Reference## Dataset structure (train, validation, test)\n- image - contains the PIL image of the chart\n- image_name - image name with PNG extension\n- width - original image width\n- height - original image height\n- instances - contains n number of labeled instances, each instance dictionary has {category_id, annotations}. The annotations are in COCO format.## Sample Usage\n\n\n\nYou can render the masks using 'pycocotools' library as follows:## Copyrights\nThis dataset is published under the CC-BY 4.0 license, which allows for unrestricted usage, but it should be cited when used.## Contact\n\nIf you have any questions or need further assistance with this dataset, please feel free to contact us:\n\n- Omar Moured, URL@URL" ]
6989972e299d53de5fc450cdd2bab5d1f7c062d4
# Dataset Card for "Topic-Prediction-Context" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Back-up/Topic-Prediction-Context
[ "region:us" ]
2023-11-02T13:20:37+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "topic", "struct": [{"name": "topic", "dtype": "string"}]}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}, {"name": "instruction", "dtype": "string"}, {"name": "prompt_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 260326, "num_examples": 101}], "download_size": 125912, "dataset_size": 260326}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-02T13:20:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Topic-Prediction-Context" More Information needed
[ "# Dataset Card for \"Topic-Prediction-Context\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Topic-Prediction-Context\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Topic-Prediction-Context\"\n\nMore Information needed" ]
daa85e908e360c9c19c6d67509327222a094a600
# Dataset Card for "lsc_acrosplit_topic_vector_128" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tomashs/lsc_acrosplit_topic_vector_128
[ "region:us" ]
2023-11-02T13:34:42+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "val", "path": "data/val-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "short_form", "dtype": "string"}, {"name": "long_form", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "__index_level_0__", "dtype": "int64"}, {"name": "topic_vector", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 511689272, "num_examples": 381714}, {"name": "val", "num_bytes": 93727357, "num_examples": 69424}, {"name": "test", "num_bytes": 69547621, "num_examples": 52461}], "download_size": 209482012, "dataset_size": 674964250}}
2023-11-02T13:35:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lsc_acrosplit_topic_vector_128" More Information needed
[ "# Dataset Card for \"lsc_acrosplit_topic_vector_128\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lsc_acrosplit_topic_vector_128\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"lsc_acrosplit_topic_vector_128\"\n\nMore Information needed" ]
0a6678077791b88932527b44dd045c744de27306
# Dataset Card for "hl-codellama-chat-response" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
magnifi/hl-codellama-chat-response
[ "region:us" ]
2023-11-02T13:36:09+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "Query", "dtype": "string"}, {"name": "Result", "dtype": "string"}, {"name": "chat_response", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1321860.461185117, "num_examples": 1523}, {"name": "test", "num_bytes": 567627.5388148829, "num_examples": 654}], "download_size": 0, "dataset_size": 1889488.0}}
2023-11-02T16:45:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hl-codellama-chat-response" More Information needed
[ "# Dataset Card for \"hl-codellama-chat-response\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hl-codellama-chat-response\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hl-codellama-chat-response\"\n\nMore Information needed" ]
5b77ccaed0c38bd2d5e4c2bba8b6533166233a3d
# Dataset Card for "AN4_ASR_dataset" This dataset can be used to train NEMO English Speech models. Audio files are sampled at `16kHz`. [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SaffalPoosh/AN4_ASR_dataset
[ "region:us" ]
2023-11-02T13:55:30+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "audio_filepath", "dtype": "audio"}, {"name": "duration", "dtype": "float64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 81556453.0, "num_examples": 948}, {"name": "test", "num_bytes": 11434382.0, "num_examples": 130}], "download_size": 86823191, "dataset_size": 92990835.0}}
2023-11-02T15:08:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "AN4_ASR_dataset" This dataset can be used to train NEMO English Speech models. Audio files are sampled at '16kHz'. More Information needed
[ "# Dataset Card for \"AN4_ASR_dataset\"\n\nThis dataset can be used to train NEMO English Speech models.\n\nAudio files are sampled at '16kHz'.\n\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"AN4_ASR_dataset\"\n\nThis dataset can be used to train NEMO English Speech models.\n\nAudio files are sampled at '16kHz'.\n\n\nMore Information needed" ]
[ 6, 44 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"AN4_ASR_dataset\"\n\nThis dataset can be used to train NEMO English Speech models.\n\nAudio files are sampled at '16kHz'.\n\n\nMore Information needed" ]
725c273da709add681b7d3fdfb17c419d7be7f22
# Dataset Card for "ultrafeedback-instruction-dataset-mini" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
alvarobartt/ultrafeedback-instruction-dataset-mini
[ "region:us" ]
2023-11-02T14:13:42+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "generations", "sequence": "string"}, {"name": "raw_generation_response", "sequence": "string"}, {"name": "rating", "sequence": "int64"}, {"name": "rationale", "sequence": "string"}, {"name": "raw_labelling_response", "struct": [{"name": "choices", "list": [{"name": "finish_reason", "dtype": "string"}, {"name": "index", "dtype": "int64"}, {"name": "message", "struct": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}]}, {"name": "created", "dtype": "int64"}, {"name": "id", "dtype": "string"}, {"name": "model", "dtype": "string"}, {"name": "object", "dtype": "string"}, {"name": "usage", "struct": [{"name": "completion_tokens", "dtype": "int64"}, {"name": "prompt_tokens", "dtype": "int64"}, {"name": "total_tokens", "dtype": "int64"}]}]}], "splits": [{"name": "train", "num_bytes": 19411, "num_examples": 5}, {"name": "test", "num_bytes": 18987, "num_examples": 5}], "download_size": 65415, "dataset_size": 38398}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2023-11-03T11:30:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ultrafeedback-instruction-dataset-mini" More Information needed
[ "# Dataset Card for \"ultrafeedback-instruction-dataset-mini\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ultrafeedback-instruction-dataset-mini\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ultrafeedback-instruction-dataset-mini\"\n\nMore Information needed" ]
164d3b41852b1eebe89f1dc0e6e0042f16835ea0
# Dataset Card for "librispeech_long" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
distil-whisper/librispeech_long
[ "region:us" ]
2023-11-02T14:22:51+00:00
{"dataset_info": {"config_name": "clean", "features": [{"name": "audio", "dtype": "audio"}], "splits": [{"name": "validation", "num_bytes": 1998609.0, "num_examples": 1}], "download_size": 1984721, "dataset_size": 1998609.0}, "configs": [{"config_name": "clean", "data_files": [{"split": "validation", "path": "clean/validation-*"}]}]}
2023-11-02T14:22:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "librispeech_long" More Information needed
[ "# Dataset Card for \"librispeech_long\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"librispeech_long\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"librispeech_long\"\n\nMore Information needed" ]
91f51b34ca947512adc0133b77ecb3241745e0b8
# Dataset Card for "noisy-cifar10-reconstructions" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
englert-m/noisy-cifar10-reconstructions
[ "region:us" ]
2023-11-02T14:26:26+00:00
{"dataset_info": {"features": [{"name": "orig", "dtype": "uint32"}, {"name": "corrupted", "dtype": "image"}, {"name": "count", "dtype": "uint32"}, {"name": "xflip", "dtype": "int64"}, {"name": "yflip", "dtype": "int64"}, {"name": "scale", "dtype": "float32"}, {"name": "rotate_frac", "dtype": "float32"}, {"name": "aniso_w", "dtype": "float32"}, {"name": "aniso_r", "dtype": "float32"}, {"name": "translate_frac", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 103967243513.625, "num_examples": 40695787}], "download_size": 109587041106, "dataset_size": 103967243513.625}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-02T15:43:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "noisy-cifar10-reconstructions" More Information needed
[ "# Dataset Card for \"noisy-cifar10-reconstructions\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"noisy-cifar10-reconstructions\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"noisy-cifar10-reconstructions\"\n\nMore Information needed" ]
99f1b221e38f4e41a5c6fa6007e1a5d9b760ce6c
# Dataset Card for "Topic-Prediction-Context-With-Random-Prompts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Back-up/Topic-Prediction-Context-With-Random-Prompts
[ "region:us" ]
2023-11-02T14:27:58+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "topic", "struct": [{"name": "topic", "dtype": "string"}]}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}, {"name": "instruction", "dtype": "string"}, {"name": "prompt_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 256498, "num_examples": 101}], "download_size": 124883, "dataset_size": 256498}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-02T14:28:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Topic-Prediction-Context-With-Random-Prompts" More Information needed
[ "# Dataset Card for \"Topic-Prediction-Context-With-Random-Prompts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Topic-Prediction-Context-With-Random-Prompts\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Topic-Prediction-Context-With-Random-Prompts\"\n\nMore Information needed" ]
a4aa15b101d12dd34c5712736d5ab9c690ca2366
# Dataset Card for "easy-german-explanations" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jmelsbach/easy-german-explanations
[ "region:us" ]
2023-11-02T14:46:34+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "href", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "parsed_content", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 76834511.02908278, "num_examples": 2860}, {"name": "test", "num_bytes": 19235492.970917225, "num_examples": 716}], "download_size": 22733394, "dataset_size": 96070004.0}}
2023-11-02T14:46:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "easy-german-explanations" More Information needed
[ "# Dataset Card for \"easy-german-explanations\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"easy-german-explanations\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"easy-german-explanations\"\n\nMore Information needed" ]
68ea58fdae44ab79b4fa2ede1685352cfde028c6
# Dataset Card for "Topic-Prediction-Context-With-Random-Prompts-in-the-end" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Back-up/Topic-Prediction-Context-With-Random-Prompts-in-the-end
[ "region:us" ]
2023-11-02T14:48:47+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "topic", "struct": [{"name": "topic", "dtype": "string"}]}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}, {"name": "instruction", "dtype": "string"}, {"name": "prompt_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 248398, "num_examples": 101}], "download_size": 125460, "dataset_size": 248398}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-02T14:48:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Topic-Prediction-Context-With-Random-Prompts-in-the-end" More Information needed
[ "# Dataset Card for \"Topic-Prediction-Context-With-Random-Prompts-in-the-end\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Topic-Prediction-Context-With-Random-Prompts-in-the-end\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Topic-Prediction-Context-With-Random-Prompts-in-the-end\"\n\nMore Information needed" ]
7ed6744a3c59fb5e2fd3cd84c65b824c3249caad
# Dataset Card for Evaluation run of 01-ai/Yi-34B ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/01-ai/Yi-34B - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [01-ai/Yi-34B](https://huggingface.co/01-ai/Yi-34B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_01-ai__Yi-34B_public", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-11-08T19:46:38.378007](https://huggingface.co/datasets/open-llm-leaderboard/details_01-ai__Yi-34B_public/blob/main/results_2023-11-08T19-46-38.378007.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.6081166107382551, "em_stderr": 0.004999326629880105, "f1": 0.6419882550335565, "f1_stderr": 0.004748239351156368, "acc": 0.6683760448499347, "acc_stderr": 0.012160441706531726 }, "harness|drop|3": { "em": 0.6081166107382551, "em_stderr": 0.004999326629880105, "f1": 0.6419882550335565, "f1_stderr": 0.004748239351156368 }, "harness|gsm8k|5": { "acc": 0.5064442759666414, "acc_stderr": 0.013771340765699767 }, "harness|winogrande|5": { "acc": 0.8303078137332282, "acc_stderr": 0.010549542647363686 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_01-ai__Yi-34B
[ "region:us" ]
2023-11-02T14:49:36+00:00
{"pretty_name": "Evaluation run of 01-ai/Yi-34B", "dataset_summary": "Dataset automatically created during the evaluation run of model [01-ai/Yi-34B](https://huggingface.co/01-ai/Yi-34B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_01-ai__Yi-34B_public\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-11-08T19:46:38.378007](https://huggingface.co/datasets/open-llm-leaderboard/details_01-ai__Yi-34B_public/blob/main/results_2023-11-08T19-46-38.378007.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.6081166107382551,\n \"em_stderr\": 0.004999326629880105,\n \"f1\": 0.6419882550335565,\n \"f1_stderr\": 0.004748239351156368,\n \"acc\": 0.6683760448499347,\n \"acc_stderr\": 0.012160441706531726\n },\n \"harness|drop|3\": {\n \"em\": 0.6081166107382551,\n \"em_stderr\": 0.004999326629880105,\n \"f1\": 0.6419882550335565,\n \"f1_stderr\": 0.004748239351156368\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.5064442759666414,\n \"acc_stderr\": 0.013771340765699767\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.8303078137332282,\n \"acc_stderr\": 0.010549542647363686\n }\n}\n```", "repo_url": "https://huggingface.co/01-ai/Yi-34B", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_11_08T19_46_38.378007", "path": ["**/details_harness|drop|3_2023-11-08T19-46-38.378007.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-11-08T19-46-38.378007.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_11_08T19_46_38.378007", "path": ["**/details_harness|gsm8k|5_2023-11-08T19-46-38.378007.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-11-08T19-46-38.378007.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_11_08T19_46_38.378007", "path": ["**/details_harness|winogrande|5_2023-11-08T19-46-38.378007.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-11-08T19-46-38.378007.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_11_08T19_46_38.378007", "path": ["results_2023-11-08T19-46-38.378007.parquet"]}, {"split": "latest", "path": ["results_2023-11-08T19-46-38.378007.parquet"]}]}]}
2023-12-01T14:52:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of 01-ai/Yi-34B ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model 01-ai/Yi-34B on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-11-08T19:46:38.378007(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of 01-ai/Yi-34B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model 01-ai/Yi-34B on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-11-08T19:46:38.378007(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of 01-ai/Yi-34B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model 01-ai/Yi-34B on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-11-08T19:46:38.378007(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 17, 31, 166, 66, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of 01-ai/Yi-34B## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model 01-ai/Yi-34B on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-11-08T19:46:38.378007(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
d3df001904708a53a234cb792c1498ae60ae5c4d
# Dataset Card for "ace_attorney" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
paoloitaliani/ace_attorney
[ "region:us" ]
2023-11-02T15:11:54+00:00
{"dataset_info": [{"config_name": "all", "features": [{"name": "document", "dtype": "string"}, {"name": "qa_pair", "dtype": "string"}, {"name": "subset", "dtype": "string"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4066829, "num_examples": 3854}, {"name": "validation", "num_bytes": 514282, "num_examples": 481}, {"name": "test", "num_bytes": 509760, "num_examples": 483}], "download_size": 2508666, "dataset_size": 5090871}, {"config_name": "multilex", "features": [{"name": "id", "dtype": "string"}, {"name": "document", "dtype": "string"}, {"name": "qa_pair", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2510781, "num_examples": 2235}, {"name": "validation", "num_bytes": 313336, "num_examples": 280}, {"name": "test", "num_bytes": 314132, "num_examples": 279}], "download_size": 1553363, "dataset_size": 3138249}, {"config_name": "output_few_shots_task_desk", "features": [{"name": "document", "dtype": "string"}, {"name": "qa_pair", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 80571, "num_examples": 80}, {"name": "validation", "num_bytes": 8287, "num_examples": 10}, {"name": "test", "num_bytes": 9032, "num_examples": 10}], "download_size": 73428, "dataset_size": 97890}, {"config_name": "output_fewshots", "features": [{"name": "document", "dtype": "string"}, {"name": "qa_pair", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 78734, "num_examples": 80}, {"name": "validation", "num_bytes": 7509, "num_examples": 10}, {"name": "test", "num_bytes": 8889, "num_examples": 10}], "download_size": 71778, "dataset_size": 95132}, {"config_name": "output_zero_shot_llama_prompt", "features": [{"name": "document", "dtype": "string"}, {"name": "qa_pair", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 80072, "num_examples": 80}, {"name": "validation", "num_bytes": 7291, "num_examples": 10}, {"name": "test", "num_bytes": 9572, "num_examples": 10}], "download_size": 75797, "dataset_size": 96935}, {"config_name": "output_zero_shot_task_desk", "features": [{"name": "document", "dtype": "string"}, {"name": "qa_pair", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 83927, "num_examples": 80}, {"name": "validation", "num_bytes": 7766, "num_examples": 10}, {"name": "test", "num_bytes": 9107, "num_examples": 10}], "download_size": 76564, "dataset_size": 100800}, {"config_name": "policies", "features": [{"name": "document", "dtype": "string"}, {"name": "qa_pair", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1502842, "num_examples": 1619}, {"name": "validation", "num_bytes": 189755, "num_examples": 203}, {"name": "test", "num_bytes": 193509, "num_examples": 202}], "download_size": 972367, "dataset_size": 1886106}], "configs": [{"config_name": "all", "data_files": [{"split": "train", "path": "all/train-*"}, {"split": "validation", "path": "all/validation-*"}, {"split": "test", "path": "all/test-*"}]}, {"config_name": "multilex", "data_files": [{"split": "train", "path": "multilex/train-*"}, {"split": "validation", "path": "multilex/validation-*"}, {"split": "test", "path": "multilex/test-*"}]}, {"config_name": "output_few_shots_task_desk", "data_files": [{"split": "train", "path": "output_few_shots_task_desk/train-*"}, {"split": "validation", "path": "output_few_shots_task_desk/validation-*"}, {"split": "test", "path": "output_few_shots_task_desk/test-*"}]}, {"config_name": "output_fewshots", "data_files": [{"split": "train", "path": "output_fewshots/train-*"}, {"split": "validation", "path": "output_fewshots/validation-*"}, {"split": "test", "path": "output_fewshots/test-*"}]}, {"config_name": "output_zero_shot_llama_prompt", "data_files": [{"split": "train", "path": "output_zero_shot_llama_prompt/train-*"}, {"split": "validation", "path": "output_zero_shot_llama_prompt/validation-*"}, {"split": "test", "path": "output_zero_shot_llama_prompt/test-*"}]}, {"config_name": "output_zero_shot_task_desk", "data_files": [{"split": "train", "path": "output_zero_shot_task_desk/train-*"}, {"split": "validation", "path": "output_zero_shot_task_desk/validation-*"}, {"split": "test", "path": "output_zero_shot_task_desk/test-*"}]}, {"config_name": "policies", "data_files": [{"split": "train", "path": "policies/train-*"}, {"split": "validation", "path": "policies/validation-*"}, {"split": "test", "path": "policies/test-*"}]}]}
2023-11-22T14:21:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ace_attorney" More Information needed
[ "# Dataset Card for \"ace_attorney\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ace_attorney\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ace_attorney\"\n\nMore Information needed" ]
0a0fe9c8bdace3f33202bbef130ad18f2040e169
# Dataset Card for "csaw_dense4" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Phaedrus/csaw_dense4
[ "region:us" ]
2023-11-02T15:16:19+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label1", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 308923702.0, "num_examples": 264}], "download_size": 46021390, "dataset_size": 308923702.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-02T15:16:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "csaw_dense4" More Information needed
[ "# Dataset Card for \"csaw_dense4\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"csaw_dense4\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"csaw_dense4\"\n\nMore Information needed" ]
8ba410064265e0217a2be5c8e3b39f43e3e6c59a
# Dataset Card for "eurlexsum_ita_cleaned_8192_232" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
gianma/eurlexsum_ita_cleaned_8192_232
[ "region:us" ]
2023-11-02T15:22:31+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "is_camera", "dtype": "bool"}, {"name": "reference", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "tokenized_len_total", "dtype": "int64"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 4119487, "num_examples": 228}, {"name": "validation", "num_bytes": 231666, "num_examples": 13}, {"name": "test", "num_bytes": 253451, "num_examples": 13}], "download_size": 0, "dataset_size": 4604604}}
2023-11-05T11:45:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for "eurlexsum_ita_cleaned_8192_232" More Information needed
[ "# Dataset Card for \"eurlexsum_ita_cleaned_8192_232\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"eurlexsum_ita_cleaned_8192_232\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"eurlexsum_ita_cleaned_8192_232\"\n\nMore Information needed" ]
f26ab43c29825e03017f8a5d8819105d4a081339
# mini coco dataset files # Required dependencies ``` OpenCV (cv2) matplotlib ipywidgets ``` # img_data.psv Extract of the coco dataset containing the following labels: ```["airplane", "backpack", "cell phone", "handbag", "suitcase", "knife", "laptop", "car"]``` (300 of each) ``` Structured as follows: | Field | Description | | --------------- | --------------------------------------------------------------------------------------------------- | | file_name | Name of image file (.png) | | height | Image height prior to padding | | width | Image width prior to padding | | annotations | Array of boundary box array, label pairs. Bbox arrays are of the form [x_min, y_min, width, height] | 1.09k rows ``` # /data (folder) This directory contains a selection of zero-padded COCO images that correspond to img_data.parquet, image names are of the following format: ``` xxxxxx.png ``` # display_boundary.py Allows images to be viewed with their boundary boxes, don't need to pay attention to how it works. ``` - Intended to run in tandem with jupyter notebook. - Takes img_name.png as input, inspect img_data.psv or /data for image names. ``` If you have any questions or issues, feel free to keep them to yourself.
iix/mini_coco_linux
[ "task_categories:text-classification", "task_categories:text-generation", "size_categories:0.001M<n<0.0011M", "language:en", "license:mit", "code", "region:us" ]
2023-11-02T15:26:24+00:00
{"language": ["en"], "license": "mit", "size_categories": ["0.001M<n<0.0011M"], "task_categories": ["text-classification", "text-generation"], "pretty_name": "*", "tags": ["code"]}
2023-11-02T16:59:42+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_categories-text-generation #size_categories-0.001M<n<0.0011M #language-English #license-mit #code #region-us
# mini coco dataset files # Required dependencies # img_data.psv Extract of the coco dataset containing the following labels: (300 of each) # /data (folder) This directory contains a selection of zero-padded COCO images that correspond to img_data.parquet, image names are of the following format: # display_boundary.py Allows images to be viewed with their boundary boxes, don't need to pay attention to how it works. If you have any questions or issues, feel free to keep them to yourself.
[ "# mini coco dataset files", "# Required dependencies", "# img_data.psv\n\nExtract of the coco dataset containing the following labels: (300 of each)", "# /data (folder)\n\nThis directory contains a selection of zero-padded COCO images that correspond to img_data.parquet, image names are of the following format:", "# display_boundary.py\n\nAllows images to be viewed with their boundary boxes, don't need to pay attention to how it works.\n\n\n\n\nIf you have any questions or issues, feel free to keep them to yourself." ]
[ "TAGS\n#task_categories-text-classification #task_categories-text-generation #size_categories-0.001M<n<0.0011M #language-English #license-mit #code #region-us \n", "# mini coco dataset files", "# Required dependencies", "# img_data.psv\n\nExtract of the coco dataset containing the following labels: (300 of each)", "# /data (folder)\n\nThis directory contains a selection of zero-padded COCO images that correspond to img_data.parquet, image names are of the following format:", "# display_boundary.py\n\nAllows images to be viewed with their boundary boxes, don't need to pay attention to how it works.\n\n\n\n\nIf you have any questions or issues, feel free to keep them to yourself." ]
[ 54, 6, 6, 27, 41, 51 ]
[ "passage: TAGS\n#task_categories-text-classification #task_categories-text-generation #size_categories-0.001M<n<0.0011M #language-English #license-mit #code #region-us \n# mini coco dataset files# Required dependencies# img_data.psv\n\nExtract of the coco dataset containing the following labels: (300 of each)# /data (folder)\n\nThis directory contains a selection of zero-padded COCO images that correspond to img_data.parquet, image names are of the following format:# display_boundary.py\n\nAllows images to be viewed with their boundary boxes, don't need to pay attention to how it works.\n\n\n\n\nIf you have any questions or issues, feel free to keep them to yourself." ]
2185c72180d888ce53290ed37b80f390a0557987
# Dataset Card for "Topic-Prediction-with-pair-qa" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Back-up/Topic-Prediction-with-pair-qa
[ "region:us" ]
2023-11-02T15:26:30+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "topic", "struct": [{"name": "topic", "dtype": "string"}]}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}, {"name": "instruction", "dtype": "string"}, {"name": "prompt_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 153313, "num_examples": 101}], "download_size": 82427, "dataset_size": 153313}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-02T15:31:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Topic-Prediction-with-pair-qa" More Information needed
[ "# Dataset Card for \"Topic-Prediction-with-pair-qa\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Topic-Prediction-with-pair-qa\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Topic-Prediction-with-pair-qa\"\n\nMore Information needed" ]
ce9a72e0eb443eb8342928f4c72a76250616f496
# Dataset Card for "Topic-Prediction-with-pair-qa-v1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Back-up/Topic-Prediction-with-pair-qa-v1
[ "region:us" ]
2023-11-02T15:32:07+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "topic", "struct": [{"name": "topic", "dtype": "string"}]}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}, {"name": "instruction", "dtype": "string"}, {"name": "prompt_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 153313, "num_examples": 101}], "download_size": 82427, "dataset_size": 153313}}
2023-11-02T15:32:12+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Topic-Prediction-with-pair-qa-v1" More Information needed
[ "# Dataset Card for \"Topic-Prediction-with-pair-qa-v1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Topic-Prediction-with-pair-qa-v1\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Topic-Prediction-with-pair-qa-v1\"\n\nMore Information needed" ]
767572107b907a9c77491440b27885af67e4d2c4
# Dataset Card for "new_dataset_finetune" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Varun1808/new_dataset_finetune
[ "region:us" ]
2023-11-02T15:40:58+00:00
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "prompt", "dtype": "string"}, {"name": "completion", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13167, "num_examples": 55}], "download_size": 4677, "dataset_size": 13167}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-02T15:41:01+00:00
[]
[]
TAGS #region-us
# Dataset Card for "new_dataset_finetune" More Information needed
[ "# Dataset Card for \"new_dataset_finetune\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"new_dataset_finetune\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"new_dataset_finetune\"\n\nMore Information needed" ]
e7cba2d862bb270a18415f71268040d6c20e99d7
# **VOC2012 Image and Annotation Visualization** Notebook **Github**: https://github.com/ikaankeskin/MLXdatasets/tree/main/ObjectDetection/PASCAL **HuggingFace**: https://huggingface.co/datasets/ikaankeskin/PASCAL_MLX This repository contains a tool that facilitates the download, extraction, and visualization of the VOC2012 dataset, complete with bounding box annotations extracted from associated XML files. ## **Features** - **Automated Dataset Download**: Fetches the VOC2012 dataset from Hugging Face's repository in ZIP format. - **ZIP Extraction**: Conveniently unzips the downloaded dataset to provide access to images and their annotations. - **Image Visualization**: Displays a select set of images from the dataset for preliminary visualization. - **XML Annotation Processing**: Reads corresponding XML annotation files for chosen images. - **Bounding Box Overlay**: Draws bounding boxes around annotated objects on the images, enhancing visualization. - **Annotation Table Display**: Offers a structured view of extracted details from XML annotations in tabular format. ``` - **Python**: Version 3.x - **Libraries**: As specified in **`requirements.txt`**, which includes: - requests - tqdm - pandas - matplotlib - opencv-python ## **Object Filters for Visualizations** The tool comes equipped with a specific color mapping that governs the visual representation of certain objects when overlaying bounding box annotations on images. The current mapping is coded as: ```python color_mapping = {'train': (0, 255, 0), 'person': (0, 0, 255)} ``` This implies: - 'train' objects are rendered with **green** bounding boxes (RGB: **`(0, 255, 0)`**). - 'person' objects are visualized with **blue** bounding boxes (RGB: **`(0, 0, 255)`**). Objects not included in this mapping will not receive bounding boxes during visualization. For incorporating additional object types or altering existing color configurations, users can edit or extend the **`color_mapping`** dictionary. For instance, to visualize 'car' objects in red, an entry **`'car': (255, 0, 0)`** can be added.
ikaankeskin/PASCAL_MLX
[ "region:us" ]
2023-11-02T15:41:22+00:00
{}
2023-11-02T16:19:35+00:00
[]
[]
TAGS #region-us
# VOC2012 Image and Annotation Visualization Notebook Github: URL HuggingFace: URL This repository contains a tool that facilitates the download, extraction, and visualization of the VOC2012 dataset, complete with bounding box annotations extracted from associated XML files. ## Features - Automated Dataset Download: Fetches the VOC2012 dataset from Hugging Face's repository in ZIP format. - ZIP Extraction: Conveniently unzips the downloaded dataset to provide access to images and their annotations. - Image Visualization: Displays a select set of images from the dataset for preliminary visualization. - XML Annotation Processing: Reads corresponding XML annotation files for chosen images. - Bounding Box Overlay: Draws bounding boxes around annotated objects on the images, enhancing visualization. - Annotation Table Display: Offers a structured view of extracted details from XML annotations in tabular format. python color_mapping = {'train': (0, 255, 0), 'person': (0, 0, 255)} ''' This implies: - 'train' objects are rendered with green bounding boxes (RGB: '(0, 255, 0)'). - 'person' objects are visualized with blue bounding boxes (RGB: '(0, 0, 255)'). Objects not included in this mapping will not receive bounding boxes during visualization. For incorporating additional object types or altering existing color configurations, users can edit or extend the 'color_mapping' dictionary. For instance, to visualize 'car' objects in red, an entry ''car': (255, 0, 0)' can be added.
[ "# VOC2012 Image and Annotation Visualization Notebook\n\nGithub: URL\n\nHuggingFace: URL\n\nThis repository contains a tool that facilitates the download, extraction, and visualization of the VOC2012 dataset, complete with bounding box annotations extracted from associated XML files.", "## Features\n\n- Automated Dataset Download: Fetches the VOC2012 dataset from Hugging Face's repository in ZIP format.\n- ZIP Extraction: Conveniently unzips the downloaded dataset to provide access to images and their annotations.\n- Image Visualization: Displays a select set of images from the dataset for preliminary visualization.\n- XML Annotation Processing: Reads corresponding XML annotation files for chosen images.\n- Bounding Box Overlay: Draws bounding boxes around annotated objects on the images, enhancing visualization.\n- Annotation Table Display: Offers a structured view of extracted details from XML annotations in tabular format.\n\n\npython\ncolor_mapping = {'train': (0, 255, 0), 'person': (0, 0, 255)}\n'''\n\nThis implies:\n\n- 'train' objects are rendered with green bounding boxes (RGB: '(0, 255, 0)').\n- 'person' objects are visualized with blue bounding boxes (RGB: '(0, 0, 255)').\n\nObjects not included in this mapping will not receive bounding boxes during visualization. For incorporating additional object types or altering existing color configurations, users can edit or extend the 'color_mapping' dictionary. For instance, to visualize 'car' objects in red, an entry ''car': (255, 0, 0)' can be added." ]
[ "TAGS\n#region-us \n", "# VOC2012 Image and Annotation Visualization Notebook\n\nGithub: URL\n\nHuggingFace: URL\n\nThis repository contains a tool that facilitates the download, extraction, and visualization of the VOC2012 dataset, complete with bounding box annotations extracted from associated XML files.", "## Features\n\n- Automated Dataset Download: Fetches the VOC2012 dataset from Hugging Face's repository in ZIP format.\n- ZIP Extraction: Conveniently unzips the downloaded dataset to provide access to images and their annotations.\n- Image Visualization: Displays a select set of images from the dataset for preliminary visualization.\n- XML Annotation Processing: Reads corresponding XML annotation files for chosen images.\n- Bounding Box Overlay: Draws bounding boxes around annotated objects on the images, enhancing visualization.\n- Annotation Table Display: Offers a structured view of extracted details from XML annotations in tabular format.\n\n\npython\ncolor_mapping = {'train': (0, 255, 0), 'person': (0, 0, 255)}\n'''\n\nThis implies:\n\n- 'train' objects are rendered with green bounding boxes (RGB: '(0, 255, 0)').\n- 'person' objects are visualized with blue bounding boxes (RGB: '(0, 0, 255)').\n\nObjects not included in this mapping will not receive bounding boxes during visualization. For incorporating additional object types or altering existing color configurations, users can edit or extend the 'color_mapping' dictionary. For instance, to visualize 'car' objects in red, an entry ''car': (255, 0, 0)' can be added." ]
[ 6, 63, 332 ]
[ "passage: TAGS\n#region-us \n# VOC2012 Image and Annotation Visualization Notebook\n\nGithub: URL\n\nHuggingFace: URL\n\nThis repository contains a tool that facilitates the download, extraction, and visualization of the VOC2012 dataset, complete with bounding box annotations extracted from associated XML files.## Features\n\n- Automated Dataset Download: Fetches the VOC2012 dataset from Hugging Face's repository in ZIP format.\n- ZIP Extraction: Conveniently unzips the downloaded dataset to provide access to images and their annotations.\n- Image Visualization: Displays a select set of images from the dataset for preliminary visualization.\n- XML Annotation Processing: Reads corresponding XML annotation files for chosen images.\n- Bounding Box Overlay: Draws bounding boxes around annotated objects on the images, enhancing visualization.\n- Annotation Table Display: Offers a structured view of extracted details from XML annotations in tabular format.\n\n\npython\ncolor_mapping = {'train': (0, 255, 0), 'person': (0, 0, 255)}\n'''\n\nThis implies:\n\n- 'train' objects are rendered with green bounding boxes (RGB: '(0, 255, 0)').\n- 'person' objects are visualized with blue bounding boxes (RGB: '(0, 0, 255)').\n\nObjects not included in this mapping will not receive bounding boxes during visualization. For incorporating additional object types or altering existing color configurations, users can edit or extend the 'color_mapping' dictionary. For instance, to visualize 'car' objects in red, an entry ''car': (255, 0, 0)' can be added." ]
e89d1288fe5dc085cc4e16bd95be118f703d5c6b
![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/6489e1e3eb763749c663f40c/ZKzKrpYNjiFHLZtH062GW.jpeg) Reference : "A Question-Entailment Approach to Question Answering". Asma Ben Abacha and Dina Demner-Fushman. BMC Bioinformatics, 2019." <br/>This is an update of Keivalya Pandya's dataset (keivalya/MedQuad-MedicalQnADataset). <h1> Content </h1> There are medical questions and corresponding responses in a prompt format for chat or instruct model types <br/>In order to fine tuned LLM with small HW (1 or 2 GPU with 14 Go) <br/>Rows above 128 tokens have been deleted. <br/>Rows have been truncated to a line break or a sentence end in order to keep a correct meaning <h2> Script to download the dataset </h2> <br/>from datasets import load_dataset <br/>dataset_name = "Laurent1/MedQuad-MedicalQnADataset_128tokens_max" <br/>dataset = load_dataset(dataset_name, split="train")
Laurent1/MedQuad-MedicalQnADataset_128tokens_max
[ "task_categories:question-answering", "task_categories:text-generation", "size_categories:10K<n<100K", "language:en", "license:apache-2.0", "region:us" ]
2023-11-02T15:43:47+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["10K<n<100K"], "task_categories": ["question-answering", "text-generation"]}
2023-11-03T06:52:06+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #task_categories-text-generation #size_categories-10K<n<100K #language-English #license-apache-2.0 #region-us
!image/jpeg Reference : "A Question-Entailment Approach to Question Answering". Asma Ben Abacha and Dina Demner-Fushman. BMC Bioinformatics, 2019." <br/>This is an update of Keivalya Pandya's dataset (keivalya/MedQuad-MedicalQnADataset). <h1> Content </h1> There are medical questions and corresponding responses in a prompt format for chat or instruct model types <br/>In order to fine tuned LLM with small HW (1 or 2 GPU with 14 Go) <br/>Rows above 128 tokens have been deleted. <br/>Rows have been truncated to a line break or a sentence end in order to keep a correct meaning <h2> Script to download the dataset </h2> <br/>from datasets import load_dataset <br/>dataset_name = "Laurent1/MedQuad-MedicalQnADataset_128tokens_max" <br/>dataset = load_dataset(dataset_name, split="train")
[]
[ "TAGS\n#task_categories-question-answering #task_categories-text-generation #size_categories-10K<n<100K #language-English #license-apache-2.0 #region-us \n" ]
[ 53 ]
[ "passage: TAGS\n#task_categories-question-answering #task_categories-text-generation #size_categories-10K<n<100K #language-English #license-apache-2.0 #region-us \n" ]
7aa5f629b5c896c558075335005159e403ebdac6
# ToaGPT Dataset <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [Adithya S K](https://github.com/adithya-s-k) - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [English] - **License:** [MIT] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [https://github.com/agencyxr/taogpt7B](https://github.com/agencyxr/taogpt7B) - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> This Dataset is Used to Finetune LLMs for Answering questions with respect to TaoScience ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> List of Question and Answer Pairs [More Information Needed]
agency888/TaoGPT-v1
[ "task_categories:question-answering", "task_categories:text2text-generation", "task_categories:table-question-answering", "size_categories:1K<n<10K", "language:en", "license:mit", "Science", "TaoScience", "doi:10.57967/hf/1310", "region:us" ]
2023-11-02T15:49:18+00:00
{"language": ["en"], "license": "mit", "size_categories": ["1K<n<10K"], "task_categories": ["question-answering", "text2text-generation", "table-question-answering"], "tags": ["Science", "TaoScience"], "dataset_info": {"features": [{"name": "answer", "dtype": "string"}, {"name": "text_mistral", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "text_finetuning", "dtype": "string"}, {"name": "question", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1412556, "num_examples": 1552}], "download_size": 476887, "dataset_size": 1412556}}
2023-11-03T14:24:42+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #task_categories-text2text-generation #task_categories-table-question-answering #size_categories-1K<n<10K #language-English #license-mit #Science #TaoScience #doi-10.57967/hf/1310 #region-us
# ToaGPT Dataset This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description - Curated by: Adithya S K - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): [English] - License: [MIT] ### Dataset Sources [optional] - Repository: URL - Demo [optional]: ## Uses This Dataset is Used to Finetune LLMs for Answering questions with respect to TaoScience ### Direct Use ## Dataset Structure List of Question and Answer Pairs
[ "# ToaGPT Dataset\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: Adithya S K\n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): [English]\n- License: [MIT]", "### Dataset Sources [optional]\n\n\n\n- Repository: URL\n- Demo [optional]:", "## Uses\n\n\nThis Dataset is Used to Finetune LLMs for Answering questions with respect to TaoScience", "### Direct Use", "## Dataset Structure\n\n\n\nList of Question and Answer Pairs" ]
[ "TAGS\n#task_categories-question-answering #task_categories-text2text-generation #task_categories-table-question-answering #size_categories-1K<n<10K #language-English #license-mit #Science #TaoScience #doi-10.57967/hf/1310 #region-us \n", "# ToaGPT Dataset\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: Adithya S K\n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): [English]\n- License: [MIT]", "### Dataset Sources [optional]\n\n\n\n- Repository: URL\n- Demo [optional]:", "## Uses\n\n\nThis Dataset is Used to Finetune LLMs for Answering questions with respect to TaoScience", "### Direct Use", "## Dataset Structure\n\n\n\nList of Question and Answer Pairs" ]
[ 84, 33, 4, 51, 23, 24, 4, 13 ]
[ "passage: TAGS\n#task_categories-question-answering #task_categories-text2text-generation #task_categories-table-question-answering #size_categories-1K<n<10K #language-English #license-mit #Science #TaoScience #doi-10.57967/hf/1310 #region-us \n# ToaGPT Dataset\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: Adithya S K\n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): [English]\n- License: [MIT]### Dataset Sources [optional]\n\n\n\n- Repository: URL\n- Demo [optional]:## Uses\n\n\nThis Dataset is Used to Finetune LLMs for Answering questions with respect to TaoScience### Direct Use## Dataset Structure\n\n\n\nList of Question and Answer Pairs" ]
c72e1dc64cd47a74085aaf56861485582c7c1bde
# ChineseWebText: Large-Scale High-quality Chinese Web Text Extracted with Effective Evaluation Model This directory contains the ChineseWebText dataset, and the EvalWeb tool-chain to process CommonCrawl Data. Our EvalWeb tool is publicly available on github https://github.com/CASIA-LM/ChineseWebText. # ChineseWebText - ### Dataset Overview We release the latest and largest Chinese dataset **ChineseWebText**, which consists of **1.42 TB** data and each text is assigned a quality score, facilitating LLM researchers to select data according to a new quality threshold. We also release a much **Cleaner subset** of **600 GB** Chinese texts with quality exceeding **90%** . <div align=center><img src="./pictures/Overview_of_output_datasets.png" style="zoom:67%;" /></div> - ### Data Example ```json { "title": "潍坊银行2021年上半年净利润同比增长29.57% 不良率降至1.10%_财经_中国网", "score": 0.95, "text": "潍坊银行2021年上半年净利润同比增长29.57% 不良率降至1.10%\n中国网财经8月24日讯 潍坊银行昨日披露2021年二季度信息报告显示,截至2021 年6月末,潍坊银行资产总额1920.44亿元,较上年末增长9.34%;负债总额1789.16亿元,较上年末增长10.54%。2021年上半年,潍坊银行实现净利润 6.09亿元,同比增长29.57%。\n资产质量方面,截至2021年6月末,潍坊银行不良贷款率1.10%,较上年末下降0.13个百分点。\n资本金方面,截至 2021年6月末,潍坊银行资本充足率、核心一级资本充足率、一级资本充足率分别为11.66%、7.89%、10.13%,分别较上年末下降1.89、0.89、1.15 个百分点。", "url": "http://finance.china.com.cn/news/special/2021bnb/20210824/5638343.shtml", "source\_domain": "finance.china.com.cn" } ``` - "title": 【string】The title of the data text. - "score": 【float】Quality score generated by the quality evaluation model. - "text": 【string】Text content of data sample. - "url": 【string】External URL, points to the original web address of the text. - "source_domain": 【string】The domain name of the source website. # Citation Please cite the paper if you use the data in this repo. ```shell @misc{chen2023chinesewebtext, title={ChineseWebText: Large-scale High-quality Chinese Web Text Extracted with Effective Evaluation Model}, author={Jianghao Chen and Pu Jian and Tengxiao Xi and Dongyi Yi and Qianlong Du and Chenglin Ding and Guibo Zhu and Chengqing Zong and Jinqiao Wang and Jiajun Zhang}, year={2023}, eprint={2311.01149}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
CASIA-LM/ChineseWebText
[ "arxiv:2311.01149", "region:us" ]
2023-11-02T15:49:54+00:00
{}
2023-11-13T01:59:09+00:00
[ "2311.01149" ]
[]
TAGS #arxiv-2311.01149 #region-us
# ChineseWebText: Large-Scale High-quality Chinese Web Text Extracted with Effective Evaluation Model This directory contains the ChineseWebText dataset, and the EvalWeb tool-chain to process CommonCrawl Data. Our EvalWeb tool is publicly available on github URL # ChineseWebText - ### Dataset Overview We release the latest and largest Chinese dataset ChineseWebText, which consists of 1.42 TB data and each text is assigned a quality score, facilitating LLM researchers to select data according to a new quality threshold. We also release a much Cleaner subset of 600 GB Chinese texts with quality exceeding 90% . <div align=center><img src="./pictures/Overview_of_output_datasets.png" style="zoom:67%;" /></div> - ### Data Example - "title": 【string】The title of the data text. - "score": 【float】Quality score generated by the quality evaluation model. - "text": 【string】Text content of data sample. - "url": 【string】External URL, points to the original web address of the text. - "source_domain": 【string】The domain name of the source website. Please cite the paper if you use the data in this repo.
[ "# ChineseWebText: Large-Scale High-quality Chinese Web Text Extracted with Effective Evaluation Model\n\nThis directory contains the ChineseWebText dataset, and the EvalWeb tool-chain to process CommonCrawl Data. Our EvalWeb tool is publicly available on github URL", "# ChineseWebText \n\n- ### Dataset Overview\n\nWe release the latest and largest Chinese dataset ChineseWebText, which consists of 1.42 TB data and each text is assigned a quality score, facilitating LLM researchers to select data according to a new quality threshold. We also release a much Cleaner subset of 600 GB Chinese texts with quality exceeding 90% .\n\n<div align=center><img src=\"./pictures/Overview_of_output_datasets.png\" style=\"zoom:67%;\" /></div>\n\n- ### Data Example\n\n \n\n - \"title\": 【string】The title of the data text.\n - \"score\": 【float】Quality score generated by the quality evaluation model.\n - \"text\": 【string】Text content of data sample.\n - \"url\": 【string】External URL, points to the original web address of the text.\n - \"source_domain\": 【string】The domain name of the source website.\n\nPlease cite the paper if you use the data in this repo." ]
[ "TAGS\n#arxiv-2311.01149 #region-us \n", "# ChineseWebText: Large-Scale High-quality Chinese Web Text Extracted with Effective Evaluation Model\n\nThis directory contains the ChineseWebText dataset, and the EvalWeb tool-chain to process CommonCrawl Data. Our EvalWeb tool is publicly available on github URL", "# ChineseWebText \n\n- ### Dataset Overview\n\nWe release the latest and largest Chinese dataset ChineseWebText, which consists of 1.42 TB data and each text is assigned a quality score, facilitating LLM researchers to select data according to a new quality threshold. We also release a much Cleaner subset of 600 GB Chinese texts with quality exceeding 90% .\n\n<div align=center><img src=\"./pictures/Overview_of_output_datasets.png\" style=\"zoom:67%;\" /></div>\n\n- ### Data Example\n\n \n\n - \"title\": 【string】The title of the data text.\n - \"score\": 【float】Quality score generated by the quality evaluation model.\n - \"text\": 【string】Text content of data sample.\n - \"url\": 【string】External URL, points to the original web address of the text.\n - \"source_domain\": 【string】The domain name of the source website.\n\nPlease cite the paper if you use the data in this repo." ]
[ 15, 63, 237 ]
[ "passage: TAGS\n#arxiv-2311.01149 #region-us \n# ChineseWebText: Large-Scale High-quality Chinese Web Text Extracted with Effective Evaluation Model\n\nThis directory contains the ChineseWebText dataset, and the EvalWeb tool-chain to process CommonCrawl Data. Our EvalWeb tool is publicly available on github URL# ChineseWebText \n\n- ### Dataset Overview\n\nWe release the latest and largest Chinese dataset ChineseWebText, which consists of 1.42 TB data and each text is assigned a quality score, facilitating LLM researchers to select data according to a new quality threshold. We also release a much Cleaner subset of 600 GB Chinese texts with quality exceeding 90% .\n\n<div align=center><img src=\"./pictures/Overview_of_output_datasets.png\" style=\"zoom:67%;\" /></div>\n\n- ### Data Example\n\n \n\n - \"title\": 【string】The title of the data text.\n - \"score\": 【float】Quality score generated by the quality evaluation model.\n - \"text\": 【string】Text content of data sample.\n - \"url\": 【string】External URL, points to the original web address of the text.\n - \"source_domain\": 【string】The domain name of the source website.\n\nPlease cite the paper if you use the data in this repo." ]
3b18a1f0206ade5dab87f6162ccffe7b53b5c31e
# MountainCar-v0 - Imitation Learning Datasets This is a dataset created by [Imitation Learning Datasets](https://github.com/NathanGavenski/IL-Datasets) project. It was created by using Stable Baselines weights from a DQN policy from [HuggingFace](https://huggingface.co/sb3/dqn-MountainCar-v0). ## Description The dataset consists of 1,000 episodes with an average episodic reward of `-98.817`. Each entry consists of: ``` obs (list): observation with length 2. action (int): action (0 or 1). reward (float): reward point for that timestep. episode_returns (bool): if that state was the initial timestep for an episode. ``` ## Usage Feel free to download and use the `teacher.jsonl` dataset as you please. If you are interested in using our PyTorch Dataset implementation, feel free to check the [IL Datasets](https://github.com/NathanGavenski/IL-Datasets/blob/main/src/imitation_datasets/dataset/dataset.py) project. There, we implement a base Dataset that downloads this dataset and all other datasets directly from HuggingFace. The Baseline Dataset also allows for more control over train and test splits and how many episodes you want to use (in cases where the 1k episodes are not necessary). ## Citation Coming soon.
NathanGavenski/MountainCar-v0
[ "size_categories:10M<n<100M", "license:mit", "Imitation Learning", "Expert Trajectory", "region:us" ]
2023-11-02T16:00:28+00:00
{"license": "mit", "size_categories": ["10M<n<100M"], "pretty_name": "MountainCar-v0 Expert Dataset", "tags": ["Imitation Learning", "Expert Trajectory"]}
2023-11-02T16:03:38+00:00
[]
[]
TAGS #size_categories-10M<n<100M #license-mit #Imitation Learning #Expert Trajectory #region-us
# MountainCar-v0 - Imitation Learning Datasets This is a dataset created by Imitation Learning Datasets project. It was created by using Stable Baselines weights from a DQN policy from HuggingFace. ## Description The dataset consists of 1,000 episodes with an average episodic reward of '-98.817'. Each entry consists of: ## Usage Feel free to download and use the 'URL' dataset as you please. If you are interested in using our PyTorch Dataset implementation, feel free to check the IL Datasets project. There, we implement a base Dataset that downloads this dataset and all other datasets directly from HuggingFace. The Baseline Dataset also allows for more control over train and test splits and how many episodes you want to use (in cases where the 1k episodes are not necessary). Coming soon.
[ "# MountainCar-v0 - Imitation Learning Datasets\n\nThis is a dataset created by Imitation Learning Datasets project. \nIt was created by using Stable Baselines weights from a DQN policy from HuggingFace.", "## Description\n\nThe dataset consists of 1,000 episodes with an average episodic reward of '-98.817'.\nEach entry consists of:", "## Usage\n\nFeel free to download and use the 'URL' dataset as you please.\nIf you are interested in using our PyTorch Dataset implementation, feel free to check the IL Datasets project.\nThere, we implement a base Dataset that downloads this dataset and all other datasets directly from HuggingFace.\nThe Baseline Dataset also allows for more control over train and test splits and how many episodes you want to use (in cases where the 1k episodes are not necessary).\n\nComing soon." ]
[ "TAGS\n#size_categories-10M<n<100M #license-mit #Imitation Learning #Expert Trajectory #region-us \n", "# MountainCar-v0 - Imitation Learning Datasets\n\nThis is a dataset created by Imitation Learning Datasets project. \nIt was created by using Stable Baselines weights from a DQN policy from HuggingFace.", "## Description\n\nThe dataset consists of 1,000 episodes with an average episodic reward of '-98.817'.\nEach entry consists of:", "## Usage\n\nFeel free to download and use the 'URL' dataset as you please.\nIf you are interested in using our PyTorch Dataset implementation, feel free to check the IL Datasets project.\nThere, we implement a base Dataset that downloads this dataset and all other datasets directly from HuggingFace.\nThe Baseline Dataset also allows for more control over train and test splits and how many episodes you want to use (in cases where the 1k episodes are not necessary).\n\nComing soon." ]
[ 33, 51, 30, 113 ]
[ "passage: TAGS\n#size_categories-10M<n<100M #license-mit #Imitation Learning #Expert Trajectory #region-us \n# MountainCar-v0 - Imitation Learning Datasets\n\nThis is a dataset created by Imitation Learning Datasets project. \nIt was created by using Stable Baselines weights from a DQN policy from HuggingFace.## Description\n\nThe dataset consists of 1,000 episodes with an average episodic reward of '-98.817'.\nEach entry consists of:## Usage\n\nFeel free to download and use the 'URL' dataset as you please.\nIf you are interested in using our PyTorch Dataset implementation, feel free to check the IL Datasets project.\nThere, we implement a base Dataset that downloads this dataset and all other datasets directly from HuggingFace.\nThe Baseline Dataset also allows for more control over train and test splits and how many episodes you want to use (in cases where the 1k episodes are not necessary).\n\nComing soon." ]
8af1c01b2f0a68cfc2f7e5ec05594d383f2cf714
# Dataset Card for "ukabs_id" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
CJWeiss/ukabs_id
[ "region:us" ]
2023-11-02T16:36:30+00:00
{"dataset_info": {"features": [{"name": "judgement", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 53147657, "num_examples": 594}, {"name": "test", "num_bytes": 10152794, "num_examples": 120}, {"name": "valid", "num_bytes": 8112656, "num_examples": 79}], "download_size": 33052417, "dataset_size": 71413107}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "valid", "path": "data/valid-*"}]}]}
2023-11-02T16:36:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ukabs_id" More Information needed
[ "# Dataset Card for \"ukabs_id\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ukabs_id\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ukabs_id\"\n\nMore Information needed" ]
45b79651708aa1f5b093729498b155cb6dda1e36
# Dataset Card for "gpt4_pipe_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Xapien/gpt4_pipe_test
[ "region:us" ]
2023-11-02T16:46:54+00:00
{"dataset_info": {"features": [{"name": "input_data", "dtype": "string"}, {"name": "output_data", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2067, "num_examples": 2}], "download_size": 8861, "dataset_size": 2067}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-02T16:46:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "gpt4_pipe_test" More Information needed
[ "# Dataset Card for \"gpt4_pipe_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"gpt4_pipe_test\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"gpt4_pipe_test\"\n\nMore Information needed" ]
88244415a8362b5b1b46dd446332109227a8df29
# Dataset Card for "hl-codellama-chat-response-v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
magnifi/hl-codellama-chat-response-v2
[ "region:us" ]
2023-11-02T16:47:40+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "Query", "dtype": "string"}, {"name": "Result", "dtype": "string"}, {"name": "chat_response", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1321860.461185117, "num_examples": 1523}, {"name": "test", "num_bytes": 567627.5388148829, "num_examples": 654}], "download_size": 109799, "dataset_size": 1889488.0}}
2023-11-02T16:47:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hl-codellama-chat-response-v2" More Information needed
[ "# Dataset Card for \"hl-codellama-chat-response-v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hl-codellama-chat-response-v2\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hl-codellama-chat-response-v2\"\n\nMore Information needed" ]
07f69284ae99bb60214c0d34fec3b46ba52115fa
# Dataset Card for "WikiDocPatientInformation_Dutch_translated_with_MariaNMT" Translation of the **English** version of the Hugging dataset [WikiDoc patient information](https://huggingface.co/datasets/medalpaca/medical_meadow_wikidoc_patient_information), based on [WikiDoc](https://www.wikidoc.org/index.php/Main_Page), a medical wikipedia. to **Dutch** using an [Maria NMT model](https://marian-nmt.github.io/), trained by [Helsinki NLP](https://huggingface.co/Helsinki-NLP/opus-mt-en-nl). Note, for reference: Maria NMT is based on [BART](https://huggingface.co/docs/transformers/model_doc/bart), described [here](https://arxiv.org/abs/1910.13461). # Attribution If you use this dataset please use the following to credit the creators of the OPUS-MT models: ``` @InProceedings{TiedemannThottingal:EAMT2020, author = {J{\"o}rg Tiedemann and Santhosh Thottingal}, title = {{OPUS-MT} — {B}uilding open translation services for the {W}orld}, booktitle = {Proceedings of the 22nd Annual Conferenec of the European Association for Machine Translation (EAMT)}, year = {2020}, address = {Lisbon, Portugal} } ``` and ``` @misc {van_es_2024, author = { {Bram van Es} }, title = { WikiDocPatientInformation_Dutch_translated_with_MariaNMT (Revision 4490701) }, year = 2024, url = { https://huggingface.co/datasets/UMCU/WikiDocPatientInformation_Dutch_translated_with_MariaNMT }, doi = { 10.57967/hf/1669 }, publisher = { Hugging Face } } ``` # License For both the Maria NMT model and the original [Helsinki NLP](https://twitter.com/HelsinkiNLP) [Opus MT model](https://huggingface.co/Helsinki-NLP) we did **not** find a license. We also did not find a license for the MedQA corpus. For these reasons we use a permissive [CC BY](https://wellcome.org/grant-funding/guidance/open-access-guidance/creative-commons-attribution-licence-cc) license. If this was in error please let us know and we will add the appropriate licensing promptly.
UMCU/WikiDocPatientInformation_Dutch_translated_with_MariaNMT
[ "task_categories:sentence-similarity", "task_categories:question-answering", "size_categories:1K<n<10K", "language:nl", "license:gpl-3.0", "medical", "arxiv:1910.13461", "doi:10.57967/hf/1669", "region:us" ]
2023-11-02T17:06:48+00:00
{"language": ["nl"], "license": "gpl-3.0", "size_categories": ["1K<n<10K"], "task_categories": ["sentence-similarity", "question-answering"], "pretty_name": "Dutch translation of WikiDoc", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2807464, "num_examples": 5760}], "download_size": 1300914, "dataset_size": 2807464}, "tags": ["medical"]}
2024-01-22T09:48:00+00:00
[ "1910.13461" ]
[ "nl" ]
TAGS #task_categories-sentence-similarity #task_categories-question-answering #size_categories-1K<n<10K #language-Dutch #license-gpl-3.0 #medical #arxiv-1910.13461 #doi-10.57967/hf/1669 #region-us
# Dataset Card for "WikiDocPatientInformation_Dutch_translated_with_MariaNMT" Translation of the English version of the Hugging dataset WikiDoc patient information, based on WikiDoc, a medical wikipedia. to Dutch using an Maria NMT model, trained by Helsinki NLP. Note, for reference: Maria NMT is based on BART, described here. # Attribution If you use this dataset please use the following to credit the creators of the OPUS-MT models: and # License For both the Maria NMT model and the original Helsinki NLP Opus MT model we did not find a license. We also did not find a license for the MedQA corpus. For these reasons we use a permissive CC BY license. If this was in error please let us know and we will add the appropriate licensing promptly.
[ "# Dataset Card for \"WikiDocPatientInformation_Dutch_translated_with_MariaNMT\"\n\nTranslation of the English version of the Hugging dataset WikiDoc patient information, based\non WikiDoc, a medical wikipedia.\nto Dutch using an Maria NMT model, trained by Helsinki NLP.\nNote, for reference: Maria NMT is based on BART, described here.", "# Attribution\n\nIf you use this dataset please use the following to credit the creators of the OPUS-MT models:\n\n\nand", "# License\n\nFor both the Maria NMT model and the original Helsinki NLP Opus MT model \nwe did not find a license. We also did not find a license for the MedQA corpus. For these reasons we use a permissive CC BY \nlicense. If this was in error please let us know and we will add the appropriate licensing promptly." ]
[ "TAGS\n#task_categories-sentence-similarity #task_categories-question-answering #size_categories-1K<n<10K #language-Dutch #license-gpl-3.0 #medical #arxiv-1910.13461 #doi-10.57967/hf/1669 #region-us \n", "# Dataset Card for \"WikiDocPatientInformation_Dutch_translated_with_MariaNMT\"\n\nTranslation of the English version of the Hugging dataset WikiDoc patient information, based\non WikiDoc, a medical wikipedia.\nto Dutch using an Maria NMT model, trained by Helsinki NLP.\nNote, for reference: Maria NMT is based on BART, described here.", "# Attribution\n\nIf you use this dataset please use the following to credit the creators of the OPUS-MT models:\n\n\nand", "# License\n\nFor both the Maria NMT model and the original Helsinki NLP Opus MT model \nwe did not find a license. We also did not find a license for the MedQA corpus. For these reasons we use a permissive CC BY \nlicense. If this was in error please let us know and we will add the appropriate licensing promptly." ]
[ 81, 89, 26, 72 ]
[ "passage: TAGS\n#task_categories-sentence-similarity #task_categories-question-answering #size_categories-1K<n<10K #language-Dutch #license-gpl-3.0 #medical #arxiv-1910.13461 #doi-10.57967/hf/1669 #region-us \n# Dataset Card for \"WikiDocPatientInformation_Dutch_translated_with_MariaNMT\"\n\nTranslation of the English version of the Hugging dataset WikiDoc patient information, based\non WikiDoc, a medical wikipedia.\nto Dutch using an Maria NMT model, trained by Helsinki NLP.\nNote, for reference: Maria NMT is based on BART, described here.# Attribution\n\nIf you use this dataset please use the following to credit the creators of the OPUS-MT models:\n\n\nand# License\n\nFor both the Maria NMT model and the original Helsinki NLP Opus MT model \nwe did not find a license. We also did not find a license for the MedQA corpus. For these reasons we use a permissive CC BY \nlicense. If this was in error please let us know and we will add the appropriate licensing promptly." ]
6cdae117e919a4f5cb47ed899f51ed3d94aa5268
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> A dataset containing songs, artists names, link to song and lyrics ## Dataset Details Dataset retrieved from https://www.kaggle.com/datasets/notshrirang/spotify-million-song-dataset ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> This is Spotify Million Song Dataset. This dataset contains song names, artists names, link to the song and lyrics. This dataset can be used for recommending songs, classifying or clustering songs. - **Curated by:** SHRIRANG MAHAJAN - **Language(s) (NLP):** ENGLISH - **License:** CC0 PUBLIC DOMAIN ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** https://www.kaggle.com/datasets/notshrirang/spotify-million-song-dataset ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> dataset_info: features: - name: {artist} dtype: {string} - name: {song} dtype: {string} - name: {link} dtype: {string} - name: {text} # Song Lyrics dtype: {string} ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Dataset Card Contact [More Information Needed]
sebastiandizon/spotify-million-song
[ "region:us" ]
2023-11-02T17:19:49+00:00
{}
2023-11-02T17:41:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for Dataset Name A dataset containing songs, artists names, link to song and lyrics ## Dataset Details Dataset retrieved from URL ### Dataset Description This is Spotify Million Song Dataset. This dataset contains song names, artists names, link to the song and lyrics. This dataset can be used for recommending songs, classifying or clustering songs. - Curated by: SHRIRANG MAHAJAN - Language(s) (NLP): ENGLISH - License: CC0 PUBLIC DOMAIN ### Dataset Sources [optional] - Repository: URL ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure dataset_info: features: - name: {artist} dtype: {string} - name: {song} dtype: {string} - name: {link} dtype: {string} - name: {text} # Song Lyrics dtype: {string} ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nA dataset containing songs, artists names, link to song and lyrics", "## Dataset Details\n\nDataset retrieved from URL", "### Dataset Description\n\n\n\nThis is Spotify Million Song Dataset. This dataset contains song names, artists names, link to the song and lyrics. This dataset can be used for recommending songs, classifying or clustering songs.\n\n- Curated by: SHRIRANG MAHAJAN\n- Language(s) (NLP): ENGLISH\n- License: CC0 PUBLIC DOMAIN", "### Dataset Sources [optional]\n\n\n\n- Repository: URL", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure\n\n\ndataset_info:\n features:\n - name: {artist}\n dtype: {string}\n - name: {song}\n dtype: {string}\n - name: {link}\n dtype: {string} \n - name: {text} # Song Lyrics\n dtype: {string}", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Dataset Name\n\n\n\nA dataset containing songs, artists names, link to song and lyrics", "## Dataset Details\n\nDataset retrieved from URL", "### Dataset Description\n\n\n\nThis is Spotify Million Song Dataset. This dataset contains song names, artists names, link to the song and lyrics. This dataset can be used for recommending songs, classifying or clustering songs.\n\n- Curated by: SHRIRANG MAHAJAN\n- Language(s) (NLP): ENGLISH\n- License: CC0 PUBLIC DOMAIN", "### Dataset Sources [optional]\n\n\n\n- Repository: URL", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure\n\n\ndataset_info:\n features:\n - name: {artist}\n dtype: {string}\n - name: {song}\n dtype: {string}\n - name: {link}\n dtype: {string} \n - name: {text} # Song Lyrics\n dtype: {string}", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.", "## Dataset Card Contact" ]
[ 6, 24, 11, 81, 16, 3, 4, 9, 65, 5, 7, 4, 7, 10, 5, 9, 8, 10, 35, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Dataset Name\n\n\n\nA dataset containing songs, artists names, link to song and lyrics## Dataset Details\n\nDataset retrieved from URL### Dataset Description\n\n\n\nThis is Spotify Million Song Dataset. This dataset contains song names, artists names, link to the song and lyrics. This dataset can be used for recommending songs, classifying or clustering songs.\n\n- Curated by: SHRIRANG MAHAJAN\n- Language(s) (NLP): ENGLISH\n- License: CC0 PUBLIC DOMAIN### Dataset Sources [optional]\n\n\n\n- Repository: URL## Uses### Direct Use### Out-of-Scope Use## Dataset Structure\n\n\ndataset_info:\n features:\n - name: {artist}\n dtype: {string}\n - name: {song}\n dtype: {string}\n - name: {link}\n dtype: {string} \n - name: {text} # Song Lyrics\n dtype: {string}## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.## Dataset Card Contact" ]
2962ed1b41b59d2d29624f12a88f50f161b37ce8
# Dataset Card for "tla_code_eval" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
aneeshas/tla_code_eval
[ "region:us" ]
2023-11-02T17:28:31+00:00
{"dataset_info": {"features": [{"name": "protocol", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "val", "num_bytes": 110431, "num_examples": 18}], "download_size": 47115, "dataset_size": 110431}, "configs": [{"config_name": "default", "data_files": [{"split": "val", "path": "data/val-*"}]}]}
2023-11-02T17:54:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tla_code_eval" More Information needed
[ "# Dataset Card for \"tla_code_eval\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tla_code_eval\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"tla_code_eval\"\n\nMore Information needed" ]
8e1a90aaeed64a0100e9c476f1842043cb563ea5
# gi_cutscn_new This repository contains cut-scene video files from a certain anime game (a.k.a. GI). ![Release Channel](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fhuggingface.co%2Fdatasets%2FDeliberatorArchiver%2Fgi_cutscn_new%2Fresolve%2Fmain%2Fversion.json&query=%24.channel&label=Channel) ![Release Version](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fhuggingface.co%2Fdatasets%2FDeliberatorArchiver%2Fgi_cutscn_new%2Fresolve%2Fmain%2Fversion.json&query=%24.version&label=Version) ![Official Version](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fis.gd%2FQ2DQ02&query=%24.data.game.latest.version&label=Official%20Version) ## Disclaimer **This resource is released for educational or research purposes only. Copyrights and other rights to this resource belong to their respective copyright holders.** ## About All cut-scene video files are encoded using HLS streaming technology. ### Details The original files were extracted directly from the game. The original file contains one video track and four audio tracks (Chinese, English, Japanese, and Korean). The video track is encoded in VP9 and the audio track is encoded in CRI HCA. All video and audio tracks are packed in CRI USM. ### Details of encoded video files See [Gist](https://gist.github.com/daydreamer-json/8e0f2bf2025db209a9727ad4f2dd983a) for the parameters used during encoding. |Type|Level|Audio|Resolution|Codec|Color|Max Bitrate|Avg Bitrate| |---|---|---|---|---|---|---|---| |Video|L-5|L-2|1920 x 1080|VP9|YUV 4:2:0 8bit BT.601|Copy|Copy| |Video|L-4|L-1|1920 x 1080|HEVC|YUV 4:2:0 8bit BT.709|12800 kbps|9600 kbps| |Video|L-3|L-1|1280 x 720|HEVC|YUV 4:2:0 8bit BT.709|6400 kbps|4800 kbps| |Video|L-2|L-0|854 x 480|HEVC|YUV 4:2:0 8bit BT.709|3600 kbps|2400 kbps| |Video|L-1|L-0|640 x 360|HEVC|YUV 4:2:0 8bit BT.709|2400 kbps|1600 kbps| |Video|L-0|L-0|426 x 240|HEVC|YUV 4:2:0 8bit BT.709|1600 kbps|800 kbps| |Type|Level|Resolution|Codec|Max Bitrate|Avg Bitrate| |---|---|---|---|---|---| |Audio|L-2|16bit 48kHz 2ch|FLAC|1536 kbps|Lossless| |Audio|L-1|16bit 48kHz 2ch|AAC|? kbps|256 kbps| |Audio|L-0|16bit 48kHz 2ch|AAC|? kbps|128 kbps| ## How to watch To watch HLS (HTTP Live Streaming) media manifest files, you will need a player that supports HLS (MPEG-TS and fMP4) demux and can decode various video and audio formats. ### For iOS / iPadOS / macOS: You can open it from the **"Vidstack Player"** link. ### For Android / Windows / Linux: The best thing to do is to use a player that can play HLS, such as mpv player, mpv.net, or VLC. Use the **"Raw"** link to watch. Another way is to use hls.js to play HLS media files in browsers that support the Media Source Extensions API, such as Chrome, Firefox, and Edge. You can open it from the **"Vidstack Player"** link. ## Cut-scene Video Links ### See [Links page](lists.md)
DeliberatorArchiver/gi_cutscn_new
[ "language:zh", "language:en", "language:ja", "language:ko", "license:cc-by-nc-nd-4.0", "region:us" ]
2023-11-02T17:31:50+00:00
{"language": ["zh", "en", "ja", "ko"], "license": "cc-by-nc-nd-4.0", "viewer": false}
2024-01-15T13:04:36+00:00
[]
[ "zh", "en", "ja", "ko" ]
TAGS #language-Chinese #language-English #language-Japanese #language-Korean #license-cc-by-nc-nd-4.0 #region-us
gi\_cutscn\_new =============== This repository contains cut-scene video files from a certain anime game (a.k.a. GI). !Release Channel !Release Version !Official Version Disclaimer ---------- This resource is released for educational or research purposes only. Copyrights and other rights to this resource belong to their respective copyright holders. About ----- All cut-scene video files are encoded using HLS streaming technology. ### Details The original files were extracted directly from the game. The original file contains one video track and four audio tracks (Chinese, English, Japanese, and Korean). The video track is encoded in VP9 and the audio track is encoded in CRI HCA. All video and audio tracks are packed in CRI USM. ### Details of encoded video files See Gist for the parameters used during encoding. How to watch ------------ To watch HLS (HTTP Live Streaming) media manifest files, you will need a player that supports HLS (MPEG-TS and fMP4) demux and can decode various video and audio formats. ### For iOS / iPadOS / macOS: You can open it from the "Vidstack Player" link. ### For Android / Windows / Linux: The best thing to do is to use a player that can play HLS, such as mpv player, URL, or VLC. Use the "Raw" link to watch. Another way is to use URL to play HLS media files in browsers that support the Media Source Extensions API, such as Chrome, Firefox, and Edge. You can open it from the "Vidstack Player" link. Cut-scene Video Links --------------------- ### See Links page
[ "### Details\n\n\nThe original files were extracted directly from the game.\n\n\nThe original file contains one video track and four audio tracks (Chinese, English, Japanese, and Korean). The video track is encoded in VP9 and the audio track is encoded in CRI HCA. All video and audio tracks are packed in CRI USM.", "### Details of encoded video files\n\n\nSee Gist for the parameters used during encoding.\n\n\n\n\nHow to watch\n------------\n\n\nTo watch HLS (HTTP Live Streaming) media manifest files, you will need a player that supports HLS (MPEG-TS and fMP4) demux and can decode various video and audio formats.", "### For iOS / iPadOS / macOS:\n\n\nYou can open it from the \"Vidstack Player\" link.", "### For Android / Windows / Linux:\n\n\nThe best thing to do is to use a player that can play HLS, such as mpv player, URL, or VLC. Use the \"Raw\" link to watch.\n\n\nAnother way is to use URL to play HLS media files in browsers that support the Media Source Extensions API, such as Chrome, Firefox, and Edge. You can open it from the \"Vidstack Player\" link.\n\n\nCut-scene Video Links\n---------------------", "### See Links page" ]
[ "TAGS\n#language-Chinese #language-English #language-Japanese #language-Korean #license-cc-by-nc-nd-4.0 #region-us \n", "### Details\n\n\nThe original files were extracted directly from the game.\n\n\nThe original file contains one video track and four audio tracks (Chinese, English, Japanese, and Korean). The video track is encoded in VP9 and the audio track is encoded in CRI HCA. All video and audio tracks are packed in CRI USM.", "### Details of encoded video files\n\n\nSee Gist for the parameters used during encoding.\n\n\n\n\nHow to watch\n------------\n\n\nTo watch HLS (HTTP Live Streaming) media manifest files, you will need a player that supports HLS (MPEG-TS and fMP4) demux and can decode various video and audio formats.", "### For iOS / iPadOS / macOS:\n\n\nYou can open it from the \"Vidstack Player\" link.", "### For Android / Windows / Linux:\n\n\nThe best thing to do is to use a player that can play HLS, such as mpv player, URL, or VLC. Use the \"Raw\" link to watch.\n\n\nAnother way is to use URL to play HLS media files in browsers that support the Media Source Extensions API, such as Chrome, Firefox, and Edge. You can open it from the \"Vidstack Player\" link.\n\n\nCut-scene Video Links\n---------------------", "### See Links page" ]
[ 39, 78, 73, 26, 105, 5 ]
[ "passage: TAGS\n#language-Chinese #language-English #language-Japanese #language-Korean #license-cc-by-nc-nd-4.0 #region-us \n### Details\n\n\nThe original files were extracted directly from the game.\n\n\nThe original file contains one video track and four audio tracks (Chinese, English, Japanese, and Korean). The video track is encoded in VP9 and the audio track is encoded in CRI HCA. All video and audio tracks are packed in CRI USM.### Details of encoded video files\n\n\nSee Gist for the parameters used during encoding.\n\n\n\n\nHow to watch\n------------\n\n\nTo watch HLS (HTTP Live Streaming) media manifest files, you will need a player that supports HLS (MPEG-TS and fMP4) demux and can decode various video and audio formats.### For iOS / iPadOS / macOS:\n\n\nYou can open it from the \"Vidstack Player\" link.### For Android / Windows / Linux:\n\n\nThe best thing to do is to use a player that can play HLS, such as mpv player, URL, or VLC. Use the \"Raw\" link to watch.\n\n\nAnother way is to use URL to play HLS media files in browsers that support the Media Source Extensions API, such as Chrome, Firefox, and Edge. You can open it from the \"Vidstack Player\" link.\n\n\nCut-scene Video Links\n---------------------### See Links page" ]
f58df92c8f962958ab43496d5592598bace74afa
<div align="center"> <!-- <img src="https://big-cheng.com/k2/k2.png" alt="k2-logo" width="200"/> --> <h2>📈 CFBenchmark: Chinese Financial Assistant with Large Language Model</h2> </div> <div align="center" style="display: flex; justify-content: center"> <a href='https://arxiv.org/abs/2311.05812' style="margin-left:15px"><img src='https://img.shields.io/badge/Paper-ArXiv-C71585'></a> <a href='https://huggingface.co/datasets/TongjiFinLab/CFBenchmark' style="margin-left:15px"><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging Face-CFBenchmark-red'></a> <a href='' style="margin-left:15px"><img src='https://img.shields.io/badge/License-Apache--2.0-blue.svg'></a> </div> English | [简体中文](README-CN.md) # Introduction Welcome to **CFBenchmark** In recent years, with the rapid development of Large Language Models~(LLMs), outstanding performance has been achieved in various tasks by existing LLMs. However, we notice that there is currently a limited amount of benchmarks focused on assessing the performance of LLMs in specific domains. In this work, we introduce CFBenchmark, a Chinese financial assistant benchmark for large language models. The basic version of CFBenchmark includes 3917 financial texts spanning three aspects and eight tasks. The CFBenchmark is organized into three aspects, financial recognition, financial classification, and financial generation. We conduct experiments in zero-shot and few-shot mode on our CFBenchmark to evaluate renowned LLMs available in the market. The results have revealed that there is still significant room to improve LLMs in basic tasks of financial text processing. In the future, we will continue to contribute more benchmark evaluations in this series. <div align="center"> <img src="imgs/Framework.png" width="100%"/> <br /> <br /></div> # Contents - [CFBenchmark-Basic](#cfbenchmark-basic) - [QuickStart](#QuickStart) - [Performance of Existing LLMs](#performance-of-existing-llms) - [Acknowledgements](#acknowledgements) - [To-Do](#to-do) - [License](#license) - [Citation](#citation) # CFBenchmark-Basic CFBenchmark-Basic includes 3917 financial texts spanning three aspects and eight tasks, organized from three aspects, financial recognition, financial classification, and financial generation. - Recognition-Company: Recognize the company names associated with financial documents (273). - Recognition-Product: Recognize the product names associated with financial documents (297). - Classification-Sentiment: Classify the sentiment associated with financial documents (591). - Classification-Event: Classify the event categories associated with financial documents (577). - Classification-Industry: Classify the industry categories associated with financial documents (402). - Generation-Suggestion: Generate investment suggestions based on the provided financial document (593). - Generation-Risk: Generate risk alerts based on the provided financial document (591). - Generation-Summary: Generate a content summary based on the provided financial document (593). We provide two examples to reveal how the few-shot setting and zero-shot setting work during evaluation. Example 1 Fewshot Input: <div align="center"> <img src="imgs/fewshot.png" width="100%"/> <br /> <br /></div> Example 2 Zeroshot Input: <div align="center"> <img src="imgs/zeroshot.png" width="100%"/> <br /> <br /></div> ## QuickStart ### Installation Below are the steps for quick installation. ```python conda create --name CFBenchmark python=3.10 conda activate CFBenchmark ``` ```python git clone https://github.com/TongjiFinLab/CFBenchmark cd CFBenchmark pip install -r requirements.txt ``` ### Dataset Preparation Download the dataset utilizing the Hugging Face dataset. Run the command **Manual download** and unzip it. Run the following command in the CFBenchmark project directory to prepare the data set in the CFBenchmark/CFBenchmark directory. ```text wget https://huggingface.co/TongjiFinLab/CFBenchmark unzip CFBenchmark.zip ``` ### Evaluation We have prepared the testing and evaluation codes for you in repo ```/codes```. To begin the evaluation, you can run the following code from the command line: ```cmd cd CFBenchmark/codes python -m run.py ``` You can enter ```codes/run.py``` to modify the parameters in it to make the code running path meet your requirements. ```py from CFBenchmark import CFBenchmark if __name__=='__main__': # EXPERIMENT SETUP modelname = 'YOUR-MODEL-NAME' model_type= 'NORMAL' #NORMAL or LoRA model_path= 'YOUR-MODEL-PATH' peft_model_path= ''#PASS YOUR OWN PATH OF PEFT MODEL IF NEEDED fewshot_text_path= '../fewshot'#DEFAULT PATH test_type='few-shot'#LET'S TAKE THE FEW-SHOT TEST AS AN EXAMPLE response_path='../cfbenchmark-response'#PATH TO RESERVE THE RESPONSE OF YOUR MODEL scores_path='../cfbenchmark-scores' #PATH TO RESERVE THE SCORE OF YOUR MODEL embedding_model_path='../bge-zh-v1.5' #PASS YOUR OWN PATH OF BGE-ZH-V1.5 benchmark_path='../cfbenchmark' #DEFAULT PATH data_source_type='offline'#online or offline #generate Class CFBenchmark cfb=CFBenchmark( model_name=modelname, model_type=model_type, model_path=model_path, peft_model_path=peft_model_path, fewshot_text_path=fewshot_text_path, test_type=test_type, response_path=response_path, scores_path=scores_path, embedding_model_path=embedding_model_path, benchmark_path=benchmark_path, data_source_type=data_source_type ) cfb.generate_model()# TO GET RESPONSE FROM YOUR MODEL cfb.get_test_scores()# TO GET YOUR MODEL SCORES FROM RESPONSE ``` We defined a class ```CFBenchmark``` to do the evaluation. ```Py class CFBenchmark: def __init__(self, model_name, model_type, model_path, peft_model_path, fewshot_text_path, test_type, response_path, scores_path, embedding_model_path, benchmark_path, data_source_type ) -> None: ``` * You can use the arguments to set paths for models. If you want to use a LoRA fine-tuned model, set model_type`` toLoRAand pass your peft model path throughpeft_model_path```. * You can set test-type to 'zero-shot' or 'few-shot' to do different evaluations. * embedding_model_path is set for bzh-zh-v1.5 for calculating cosine-similarity. * You can modify the hyperparameters in CFBenchmark.generate_model() for text generations. * We provide CFBenchmark saved as a Dataset data type in both Hugging Face and Github. If you want to use an offline version of the benchmark, set the parameter data_source_type to offline````. If you want to use the online version of the benchmark, set the parameterdata_source_typetoonline```. ## Performance of Existing LLMs We utilize two types of metrics to evaluate the performance of LLMs in the financial domain on our CFBenchmark. For recognition and classification tasks, we employ the **F1 score** as the evaluation metric, which balances precision and recall. For the generation tasks, we utilize **cosine similarity** between the vector representation of ground truth and the generated answer to measure the generation ability. Since there are usually different expressions with similar meanings in our generation tasks, simply employing Rough-Score or BULE-score is not reasonable. Specifically, the **bge-zh-v1.5** is assigned as the oracle model to generate the sentence embedding. We calculate evaluation scores for each sub-task individually and provide the average score for each category. The best scores of LLMs(considering zero-shot and few-shot), as well as which of our model, are demonstrated below: | Model | Size | Company | Product | R.Avg | Sector | Event | Sentiment | C.Avg | Summary | Risk | Suggestion | G.Avg | Avg | | ------------------ | ---- | --------- | --------- | --------- | --------- | --------- | --------- | --------- | --------- | --------- | ---------- | --------- | --------- | | HUMAN | - | 0.931 | 0.744 | 0.838 | 0.975 | 0.939 | 0.912 | 0.942 | 1.000 | 1.000 | 1.000 | 1.000 | 0.927 | | ChatGPT | 20B | 0.797 | 0.198 | 0.498 | 0.453 | 0.458 | 0.425 | 0.455 | 0.593 | 0.541 | 0.771 | 0.635 | 0.529 | | ERNIE-Bot | 260B | 0.807 | 0.300 | 0.533 | 0.408 | 0.350 | 0.186 | 0.315 | 0.715 | 0.590 | 0.716 | 0.673 | 0.507 | | ERNIE-Bot-4 | - | 0.819 | 0.417 | 0.618 | 0.418 | 0.358 | 0.375 | 0.384 | 0.721 | 0.629 | 0.718 | 0.689 | 0.564 | | Falcon-7B | 7B | 0.671 | 0.168 | 0.420 | 0.169 | 0.132 | 0.250 | 0.184 | 0.302 | 0.301 | 0.246 | 0.283 | 0.296 | | Falcon-7B-chat | 7B | 0.582 | 0.046 | 0.314 | 0.112 | 0.142 | 0.153 | 0.135 | 0.307 | 0.299 | 0.258 | 0.288 | 0.246 | | bloomz-7B1 | 7B | 0.765 | 0.166 | 0.465 | 0.252 | 0.154 | 0.394 | 0.267 | 0.451 | 0.371 | 0.462 | 0.428 | 0.387 | | bloomz-7Bt1-mt | 7B | 0.751 | 0.157 | 0.454 | 0.087 | 0.182 | 0.380 | 0.216 | 0.425 | 0.379 | 0.396 | 0.400 | 0.357 | | Qwen-7B | 7B | 0.780 | 0.357 | 0.569 | 0.480 | 0.335 | 0.379 | 0.398 | 0.750 | 0.505 | 0.713 | 0.656 | 0.541 | | Qwen-Chat-7B | 7B | 0.763 | 0.360 | 0.562 | 0.400 | 0.367 | 0.265 | 0.344 | 0.548 | 0.307 | 0.379 | 0.411 | 0.439 | | Qwen-14B | 14B | 0.805 | 0.421 | 0.613 | 0.481 | 0.350 | 0.385 | 0.405 | 0.754 | 0.608 | 0.717 | 0.693 | 0.570 | | Qwen-Chat-14B | 14B | 0.814 | 0.442 | 0.628 | 0.382 | 0.400 | 0.350 | 0.377 | 0.732 | 0.478 | 0.736 | 0.649 | 0.551 | | ChatGLM2-6B | 6B | 0.747 | 0.313 | 0.530 | 0.285 | 0.300 | 0.357 | 0.314 | 0.657 | 0.454 | 0.671 | 0.594 | 0.479 | | Baichuan2-7B-Base | 7B | 0.672 | 0.340 | 0.506 | 0.342 | 0.490 | 0.480 | 0.437 | 0.739 | 0.619 | 0.751 | 0.703 | 0.549 | | Baichuan2-7B-Chat | 7B | 0.757 | 0.402 | 0.579 | 0.425 | 0.475 | 0.323 | 0.408 | 0.725 | 0.648 | 0.732 | 0.702 | 0.563 | | Baichuan2-13B-Base | 13B | 0.781 | 0.330 | 0.555 | 0.436 | 0.496 | 0.477 | 0.470 | 0.725 | 0.503 | 0.747 | 0.658 | 0.561 | | Baichuan2-13B-Chat | 13B | 0.797 | 0.314 | 0.556 | 0.472 | 0.507 | 0.387 | 0.455 | 0.739 | 0.634 | 0.746 | 0.706 | 0.572 | | InternLM-7B | 7B | 0.612 | 0.233 | 0.423 | 0.266 | 0.311 | 0.328 | 0.302 | 0.378 | 0.336 | 0.379 | 0.364 | 0.363 | | InternLM-7B-Chat | 7B | 0.632 | 0.261 | 0.447 | 0.272 | 0.364 | 0.399 | 0.345 | 0.363 | 0.270 | 0.353 | 0.329 | 0.374 | | InternLM-20B | 20B | 0.809 | 0.358 | 0.583 | 0.500 | 0.427 | 0.417 | 0.448 | 0.706 | 0.653 | 0.728 | 0.695 | 0.575 | | InternLM-20B-Chat | 20B | 0.488 | 0.362 | 0.425 | 0.323 | 0.327 | 0.370 | 0.340 | 0.706 | 0.578 | 0.762 | 0.662 | 0.476 | | CFGPT1-stf-LoRA | 7B | 0.820 | 0.414 | 0.617 | 0.569 | 0.729 | 0.769 | 0.689 | 0.745 | 0.584 | 0.609 | 0.646 | 0.650 | | CFGPT1-sft-Full | 7B | **0.836** | **0.476** | **0.656** | **0.700** | **0.808** | **0.829** | **0.779** | **0.798** | **0.669** | **0.808** | **0.758** | **0.731** | # Acknowledgements CFBenchmark has referred to the following open-source projects. We want to express our gratitude and respect to the researchers of the projects. - tiiuae/falcon LLM series(https://huggingface.co/tiiuae/falcon-7b) - bigscience/bloomz LLM series(https://huggingface.co/bigscience/bloomz-7b1) - QwenLM/Qwen LLM series(https://github.com/QwenLM/Qwen) - THUDM/ChatGLM2-6b(https://github.com/THUDM/ChatGLM2-6B) - baichuan-inc/Baichuan2 LLM series(https://github.com/baichuan-inc/Baichuan2) - InternLM/InternLM LLM series(https://github.com/InternLM/InternLM) - ssymmetry/BBT-FinCUGE-Applications(https://github.com/ssymmetry/BBT-FinCUGE-Applications) - chancefocus/PIXIU(https://github.com/chancefocus/PIXIU) - SUFE-AIFLM-Lab/FinEval(https://github.com/SUFE-AIFLM-Lab/FinEval) # To-Do - CFBenchmark-Advanced: - In various scenarios of Chinese financial usage, propose more evaluation tasks to enrich the CFBenchmark series. # License CFBenchmark is a research preview intended for non-commercial use only, subject to the Terms of Use of the data generated by OpenAI. Please contact us if you find any potential violations. The code is released under the Apache License 2.0. # Citation ```bibtex @misc{lei2023cfbenchmark, title={{CFBenchmark}: Chinese Financial Assistant Benchmark for Large Language Model}, author={Lei, Yang and Li, Jiangtong and Jiang, Ming and Hu, Junjie and Cheng, Dawei and Ding, Zhijun and Jiang, Changjun}, year={2023}, eprint={2311.05812}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
TongjiFinLab/CFBenchmark
[ "task_categories:text-classification", "task_categories:summarization", "task_categories:text-generation", "size_categories:1K<n<10K", "language:en", "license:apache-2.0", "arxiv:2311.05812", "region:us" ]
2023-11-02T17:32:08+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification", "summarization", "text-generation"]}
2023-12-16T14:36:22+00:00
[ "2311.05812" ]
[ "en" ]
TAGS #task_categories-text-classification #task_categories-summarization #task_categories-text-generation #size_categories-1K<n<10K #language-English #license-apache-2.0 #arxiv-2311.05812 #region-us
<div align="center"> <h2> CFBenchmark: Chinese Financial Assistant with Large Language Model</h2> </div> <div align="center" style="display: flex; justify-content: center"> <a href='URL style="margin-left:15px"><img src='URL <a href='URL style="margin-left:15px"><img src='URL Face-CFBenchmark-red'></a> <a href='' style="margin-left:15px"><img src='URL </div> English | 简体中文 # Introduction Welcome to CFBenchmark In recent years, with the rapid development of Large Language Models~(LLMs), outstanding performance has been achieved in various tasks by existing LLMs. However, we notice that there is currently a limited amount of benchmarks focused on assessing the performance of LLMs in specific domains. In this work, we introduce CFBenchmark, a Chinese financial assistant benchmark for large language models. The basic version of CFBenchmark includes 3917 financial texts spanning three aspects and eight tasks. The CFBenchmark is organized into three aspects, financial recognition, financial classification, and financial generation. We conduct experiments in zero-shot and few-shot mode on our CFBenchmark to evaluate renowned LLMs available in the market. The results have revealed that there is still significant room to improve LLMs in basic tasks of financial text processing. In the future, we will continue to contribute more benchmark evaluations in this series. <div align="center"> <img src="imgs/URL" width="100%"/> <br /> <br /></div> # Contents - CFBenchmark-Basic - QuickStart - Performance of Existing LLMs - Acknowledgements - To-Do - License - Citation # CFBenchmark-Basic CFBenchmark-Basic includes 3917 financial texts spanning three aspects and eight tasks, organized from three aspects, financial recognition, financial classification, and financial generation. - Recognition-Company: Recognize the company names associated with financial documents (273). - Recognition-Product: Recognize the product names associated with financial documents (297). - Classification-Sentiment: Classify the sentiment associated with financial documents (591). - Classification-Event: Classify the event categories associated with financial documents (577). - Classification-Industry: Classify the industry categories associated with financial documents (402). - Generation-Suggestion: Generate investment suggestions based on the provided financial document (593). - Generation-Risk: Generate risk alerts based on the provided financial document (591). - Generation-Summary: Generate a content summary based on the provided financial document (593). We provide two examples to reveal how the few-shot setting and zero-shot setting work during evaluation. Example 1 Fewshot Input: <div align="center"> <img src="imgs/URL" width="100%"/> <br /> <br /></div> Example 2 Zeroshot Input: <div align="center"> <img src="imgs/URL" width="100%"/> <br /> <br /></div> ## QuickStart ### Installation Below are the steps for quick installation. ### Dataset Preparation Download the dataset utilizing the Hugging Face dataset. Run the command Manual download and unzip it. Run the following command in the CFBenchmark project directory to prepare the data set in the CFBenchmark/CFBenchmark directory. ### Evaluation We have prepared the testing and evaluation codes for you in repo . To begin the evaluation, you can run the following code from the command line: You can enter to modify the parameters in it to make the code running path meet your requirements. We defined a class to do the evaluation. * You can use the arguments to set paths for models. If you want to use a LoRA fine-tuned model, set model_type'' toLoRAand pass your peft model path throughpeft_model_path'. If you want to use the online version of the benchmark, set the parameterdata_source_typetoonlinebibtex @misc{lei2023cfbenchmark, title={{CFBenchmark}: Chinese Financial Assistant Benchmark for Large Language Model}, author={Lei, Yang and Li, Jiangtong and Jiang, Ming and Hu, Junjie and Cheng, Dawei and Ding, Zhijun and Jiang, Changjun}, year={2023}, eprint={2311.05812}, archivePrefix={arXiv}, primaryClass={cs.CL} } '''
[ "# Introduction\n\nWelcome to CFBenchmark\n\nIn recent years, with the rapid development of Large Language Models~(LLMs), outstanding performance has been achieved in various tasks by existing LLMs. However, we notice that there is currently a limited amount of benchmarks focused on assessing the performance of LLMs in specific domains. \n\nIn this work, we introduce CFBenchmark, a Chinese financial assistant benchmark for large language models. The basic version of CFBenchmark includes 3917 financial texts spanning three aspects and eight tasks. The CFBenchmark is organized into three aspects, financial recognition, financial classification, and financial generation. We conduct experiments in zero-shot and few-shot mode on our CFBenchmark to evaluate renowned LLMs available in the market. The results have revealed that there is still significant room to improve LLMs in basic tasks of financial text processing.\n\nIn the future, we will continue to contribute more benchmark evaluations in this series.\n\n<div align=\"center\">\n <img src=\"imgs/URL\" width=\"100%\"/>\n <br />\n <br /></div>", "# Contents\n\n- CFBenchmark-Basic\n - QuickStart\n - Performance of Existing LLMs\n- Acknowledgements\n- To-Do\n- License\n- Citation", "# CFBenchmark-Basic\n\nCFBenchmark-Basic includes 3917 financial texts spanning three aspects and eight tasks, organized from three aspects, financial recognition, financial classification, and financial generation.\n\n- Recognition-Company: Recognize the company names associated with financial documents (273).\n- Recognition-Product: Recognize the product names associated with financial documents (297).\n- Classification-Sentiment: Classify the sentiment associated with financial documents (591).\n- Classification-Event: Classify the event categories associated with financial documents (577).\n- Classification-Industry: Classify the industry categories associated with financial documents (402).\n- Generation-Suggestion: Generate investment suggestions based on the provided financial document (593).\n- Generation-Risk: Generate risk alerts based on the provided financial document (591).\n- Generation-Summary: Generate a content summary based on the provided financial document (593).\n\nWe provide two examples to reveal how the few-shot setting and zero-shot setting work during evaluation.\n\nExample 1 Fewshot Input:\n<div align=\"center\">\n <img src=\"imgs/URL\" width=\"100%\"/>\n <br />\n <br /></div>\n\nExample 2 Zeroshot Input:\n<div align=\"center\">\n <img src=\"imgs/URL\" width=\"100%\"/>\n <br />\n <br /></div>", "## QuickStart", "### Installation\n\nBelow are the steps for quick installation.", "### Dataset Preparation\n\nDownload the dataset utilizing the Hugging Face dataset. Run the command Manual download and unzip it. Run the following command in the CFBenchmark project directory to prepare the data set in the CFBenchmark/CFBenchmark directory.", "### Evaluation\n\nWe have prepared the testing and evaluation codes for you in repo . \n\nTo begin the evaluation, you can run the following code from the command line:\n\nYou can enter to modify the parameters in it to make the code running path meet your requirements.\n\n\nWe defined a class to do the evaluation. \n\n\n\n* You can use the arguments to set paths for models. If you want to use a LoRA fine-tuned model, set model_type'' toLoRAand pass your peft model path throughpeft_model_path'. If you want to use the online version of the benchmark, set the parameterdata_source_typetoonlinebibtex\n@misc{lei2023cfbenchmark,\n title={{CFBenchmark}: Chinese Financial Assistant Benchmark for Large Language Model}, \n author={Lei, Yang and Li, Jiangtong and Jiang, Ming and Hu, Junjie and Cheng, Dawei and Ding, Zhijun and Jiang, Changjun},\n year={2023},\n eprint={2311.05812},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'''" ]
[ "TAGS\n#task_categories-text-classification #task_categories-summarization #task_categories-text-generation #size_categories-1K<n<10K #language-English #license-apache-2.0 #arxiv-2311.05812 #region-us \n", "# Introduction\n\nWelcome to CFBenchmark\n\nIn recent years, with the rapid development of Large Language Models~(LLMs), outstanding performance has been achieved in various tasks by existing LLMs. However, we notice that there is currently a limited amount of benchmarks focused on assessing the performance of LLMs in specific domains. \n\nIn this work, we introduce CFBenchmark, a Chinese financial assistant benchmark for large language models. The basic version of CFBenchmark includes 3917 financial texts spanning three aspects and eight tasks. The CFBenchmark is organized into three aspects, financial recognition, financial classification, and financial generation. We conduct experiments in zero-shot and few-shot mode on our CFBenchmark to evaluate renowned LLMs available in the market. The results have revealed that there is still significant room to improve LLMs in basic tasks of financial text processing.\n\nIn the future, we will continue to contribute more benchmark evaluations in this series.\n\n<div align=\"center\">\n <img src=\"imgs/URL\" width=\"100%\"/>\n <br />\n <br /></div>", "# Contents\n\n- CFBenchmark-Basic\n - QuickStart\n - Performance of Existing LLMs\n- Acknowledgements\n- To-Do\n- License\n- Citation", "# CFBenchmark-Basic\n\nCFBenchmark-Basic includes 3917 financial texts spanning three aspects and eight tasks, organized from three aspects, financial recognition, financial classification, and financial generation.\n\n- Recognition-Company: Recognize the company names associated with financial documents (273).\n- Recognition-Product: Recognize the product names associated with financial documents (297).\n- Classification-Sentiment: Classify the sentiment associated with financial documents (591).\n- Classification-Event: Classify the event categories associated with financial documents (577).\n- Classification-Industry: Classify the industry categories associated with financial documents (402).\n- Generation-Suggestion: Generate investment suggestions based on the provided financial document (593).\n- Generation-Risk: Generate risk alerts based on the provided financial document (591).\n- Generation-Summary: Generate a content summary based on the provided financial document (593).\n\nWe provide two examples to reveal how the few-shot setting and zero-shot setting work during evaluation.\n\nExample 1 Fewshot Input:\n<div align=\"center\">\n <img src=\"imgs/URL\" width=\"100%\"/>\n <br />\n <br /></div>\n\nExample 2 Zeroshot Input:\n<div align=\"center\">\n <img src=\"imgs/URL\" width=\"100%\"/>\n <br />\n <br /></div>", "## QuickStart", "### Installation\n\nBelow are the steps for quick installation.", "### Dataset Preparation\n\nDownload the dataset utilizing the Hugging Face dataset. Run the command Manual download and unzip it. Run the following command in the CFBenchmark project directory to prepare the data set in the CFBenchmark/CFBenchmark directory.", "### Evaluation\n\nWe have prepared the testing and evaluation codes for you in repo . \n\nTo begin the evaluation, you can run the following code from the command line:\n\nYou can enter to modify the parameters in it to make the code running path meet your requirements.\n\n\nWe defined a class to do the evaluation. \n\n\n\n* You can use the arguments to set paths for models. If you want to use a LoRA fine-tuned model, set model_type'' toLoRAand pass your peft model path throughpeft_model_path'. If you want to use the online version of the benchmark, set the parameterdata_source_typetoonlinebibtex\n@misc{lei2023cfbenchmark,\n title={{CFBenchmark}: Chinese Financial Assistant Benchmark for Large Language Model}, \n author={Lei, Yang and Li, Jiangtong and Jiang, Ming and Hu, Junjie and Cheng, Dawei and Ding, Zhijun and Jiang, Changjun},\n year={2023},\n eprint={2311.05812},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'''" ]
[ 71, 248, 37, 315, 3, 12, 61, 259 ]
[ "passage: TAGS\n#task_categories-text-classification #task_categories-summarization #task_categories-text-generation #size_categories-1K<n<10K #language-English #license-apache-2.0 #arxiv-2311.05812 #region-us \n# Introduction\n\nWelcome to CFBenchmark\n\nIn recent years, with the rapid development of Large Language Models~(LLMs), outstanding performance has been achieved in various tasks by existing LLMs. However, we notice that there is currently a limited amount of benchmarks focused on assessing the performance of LLMs in specific domains. \n\nIn this work, we introduce CFBenchmark, a Chinese financial assistant benchmark for large language models. The basic version of CFBenchmark includes 3917 financial texts spanning three aspects and eight tasks. The CFBenchmark is organized into three aspects, financial recognition, financial classification, and financial generation. We conduct experiments in zero-shot and few-shot mode on our CFBenchmark to evaluate renowned LLMs available in the market. The results have revealed that there is still significant room to improve LLMs in basic tasks of financial text processing.\n\nIn the future, we will continue to contribute more benchmark evaluations in this series.\n\n<div align=\"center\">\n <img src=\"imgs/URL\" width=\"100%\"/>\n <br />\n <br /></div># Contents\n\n- CFBenchmark-Basic\n - QuickStart\n - Performance of Existing LLMs\n- Acknowledgements\n- To-Do\n- License\n- Citation" ]
98355e3b92afb9f92072cbd91dba766f68a70d9a
This repository contains the English '[SemEval-2014 Task 4: Aspect Based Sentiment Analysis](https://aclanthology.org/S14-2004/)'. translated with DeepL into Spanish, French, Russian, and Turkish. The **labels have been manually projected**. For more details, read this paper: [Model and Data Transfer for Cross-Lingual Sequence Labelling in Zero-Resource Settings](https://arxiv.org/abs/2210.12623). **Intended Usage**: Since the datasets are parallel across languages, they are ideal for evaluating annotation projection algorithms, such as [T-Projection](https://arxiv.org/abs/2212.10548). # Label Dictionary ```python { "O": 0, "B-TARGET": 1, "I-TARGET": 2 } ``` # Cication If you use this data, please cite the following papers: ```bibtex @inproceedings{garcia-ferrero-etal-2022-model, title = "Model and Data Transfer for Cross-Lingual Sequence Labelling in Zero-Resource Settings", author = "Garc{\'\i}a-Ferrero, Iker and Agerri, Rodrigo and Rigau, German", editor = "Goldberg, Yoav and Kozareva, Zornitsa and Zhang, Yue", booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022", month = dec, year = "2022", address = "Abu Dhabi, United Arab Emirates", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.findings-emnlp.478", doi = "10.18653/v1/2022.findings-emnlp.478", pages = "6403--6416", abstract = "Zero-resource cross-lingual transfer approaches aim to apply supervised modelsfrom a source language to unlabelled target languages. In this paper we performan in-depth study of the two main techniques employed so far for cross-lingualzero-resource sequence labelling, based either on data or model transfer. Although previous research has proposed translation and annotation projection(data-based cross-lingual transfer) as an effective technique for cross-lingualsequence labelling, in this paper we experimentally demonstrate that highcapacity multilingual language models applied in a zero-shot (model-basedcross-lingual transfer) setting consistently outperform data-basedcross-lingual transfer approaches. A detailed analysis of our results suggeststhat this might be due to important differences in language use. Morespecifically, machine translation often generates a textual signal which isdifferent to what the models are exposed to when using gold standard data,which affects both the fine-tuning and evaluation processes. Our results alsoindicate that data-based cross-lingual transfer approaches remain a competitiveoption when high-capacity multilingual language models are not available.", } @inproceedings{pontiki-etal-2014-semeval, title = "{S}em{E}val-2014 Task 4: Aspect Based Sentiment Analysis", author = "Pontiki, Maria and Galanis, Dimitris and Pavlopoulos, John and Papageorgiou, Harris and Androutsopoulos, Ion and Manandhar, Suresh", editor = "Nakov, Preslav and Zesch, Torsten", booktitle = "Proceedings of the 8th International Workshop on Semantic Evaluation ({S}em{E}val 2014)", month = aug, year = "2014", address = "Dublin, Ireland", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/S14-2004", doi = "10.3115/v1/S14-2004", pages = "27--35", } ```
HiTZ/Multilingual-Opinion-Target-Extraction
[ "task_categories:token-classification", "size_categories:1K<n<10K", "language:en", "language:fr", "language:es", "language:ru", "language:tr", "license:apache-2.0", "opinion", "target", "absa", "aspect", "sentiment analysis", "arxiv:2210.12623", "arxiv:2212.10548", "region:us" ]
2023-11-02T17:41:40+00:00
{"language": ["en", "fr", "es", "ru", "tr"], "license": "apache-2.0", "size_categories": ["1K<n<10K"], "task_categories": ["token-classification"], "paperswithcode_id": "aspect-based-sentiment-analysis", "pretty_name": "Multilingual Opinion Target Extraction", "arxiv": 2210.12623, "configs": [{"config_name": "en", "data_files": [{"split": "train", "path": "en.ote.train.json"}, {"split": "test", "path": "en.ote.test.json"}]}, {"config_name": "es", "data_files": [{"split": "train", "path": "es.ote.train.json"}, {"split": "test", "path": "es.ote.test.json"}]}, {"config_name": "fr", "data_files": [{"split": "train", "path": "fr.ote.train.json"}, {"split": "test", "path": "fr.ote.test.json"}]}, {"config_name": "ru", "data_files": [{"split": "train", "path": "ru.ote.train.json"}, {"split": "test", "path": "ru.ote.test.json"}]}, {"config_name": "tr", "data_files": [{"split": "train", "path": "tr.ote.train.json"}]}], "tags": ["opinion", "target", "absa", "aspect", "sentiment analysis"]}
2023-11-22T13:32:07+00:00
[ "2210.12623", "2212.10548" ]
[ "en", "fr", "es", "ru", "tr" ]
TAGS #task_categories-token-classification #size_categories-1K<n<10K #language-English #language-French #language-Spanish #language-Russian #language-Turkish #license-apache-2.0 #opinion #target #absa #aspect #sentiment analysis #arxiv-2210.12623 #arxiv-2212.10548 #region-us
This repository contains the English 'SemEval-2014 Task 4: Aspect Based Sentiment Analysis'. translated with DeepL into Spanish, French, Russian, and Turkish. The labels have been manually projected. For more details, read this paper: Model and Data Transfer for Cross-Lingual Sequence Labelling in Zero-Resource Settings. Intended Usage: Since the datasets are parallel across languages, they are ideal for evaluating annotation projection algorithms, such as T-Projection. # Label Dictionary # Cication If you use this data, please cite the following papers:
[ "# Label Dictionary", "# Cication\n\nIf you use this data, please cite the following papers:" ]
[ "TAGS\n#task_categories-token-classification #size_categories-1K<n<10K #language-English #language-French #language-Spanish #language-Russian #language-Turkish #license-apache-2.0 #opinion #target #absa #aspect #sentiment analysis #arxiv-2210.12623 #arxiv-2212.10548 #region-us \n", "# Label Dictionary", "# Cication\n\nIf you use this data, please cite the following papers:" ]
[ 96, 3, 16 ]
[ "passage: TAGS\n#task_categories-token-classification #size_categories-1K<n<10K #language-English #language-French #language-Spanish #language-Russian #language-Turkish #license-apache-2.0 #opinion #target #absa #aspect #sentiment analysis #arxiv-2210.12623 #arxiv-2212.10548 #region-us \n# Label Dictionary# Cication\n\nIf you use this data, please cite the following papers:" ]
705564aeb05024a265d4469f487f26e88fec7a1a
# Dataset Card for "guanaco-llama2-1k-testArt" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
deepbrain/guanaco-llama2-1k-testArt
[ "region:us" ]
2023-11-02T18:15:49+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1654448, "num_examples": 1000}], "download_size": 0, "dataset_size": 1654448}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-03T02:22:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "guanaco-llama2-1k-testArt" More Information needed
[ "# Dataset Card for \"guanaco-llama2-1k-testArt\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"guanaco-llama2-1k-testArt\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"guanaco-llama2-1k-testArt\"\n\nMore Information needed" ]
ed0ace5b0e55d7e5ce801274c0c8a0541fba7c03
# Dataset Card for "vlsp-dataset-2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
pphuc25/vlsp-dataset-2
[ "region:us" ]
2023-11-02T18:16:34+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6165849355.594, "num_examples": 50482}], "download_size": 6304115752, "dataset_size": 6165849355.594}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-02T18:23:01+00:00
[]
[]
TAGS #region-us
# Dataset Card for "vlsp-dataset-2" More Information needed
[ "# Dataset Card for \"vlsp-dataset-2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"vlsp-dataset-2\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"vlsp-dataset-2\"\n\nMore Information needed" ]
f71d809b506e299ef9750a8ca5a6b7bfb3cb9a39
# Dataset Card for "mbpp_short" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
determined-ai/mbpp_short
[ "region:us" ]
2023-11-02T18:21:49+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "code", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 43506, "num_examples": 227}, {"name": "test", "num_bytes": 54302, "num_examples": 291}, {"name": "validation", "num_bytes": 9398, "num_examples": 51}], "download_size": 56077, "dataset_size": 107206}}
2023-11-02T18:21:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "mbpp_short" More Information needed
[ "# Dataset Card for \"mbpp_short\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"mbpp_short\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"mbpp_short\"\n\nMore Information needed" ]
8130c1065aa9ff1254ad5682d3cb2e349c78b876
Here we can find the datasets generated to make a out-of-context classifier (ElektraGo Chat). There are different version of the dataset: - High Imbalanced - Imbalanced - Balanced (data augmentation and undersampling)
Fraternitas/out-of-scope
[ "task_categories:text-classification", "size_categories:10K<n<100K", "language:en", "region:us" ]
2023-11-02T18:26:51+00:00
{"language": ["en"], "size_categories": ["10K<n<100K"], "task_categories": ["text-classification"]}
2023-11-03T00:47:43+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #size_categories-10K<n<100K #language-English #region-us
Here we can find the datasets generated to make a out-of-context classifier (ElektraGo Chat). There are different version of the dataset: - High Imbalanced - Imbalanced - Balanced (data augmentation and undersampling)
[]
[ "TAGS\n#task_categories-text-classification #size_categories-10K<n<100K #language-English #region-us \n" ]
[ 33 ]
[ "passage: TAGS\n#task_categories-text-classification #size_categories-10K<n<100K #language-English #region-us \n" ]
59bb84d246dd8c4f82b459477cae4ee985469c2c
# Dataset Card for "identity_finetune_data_3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sayan1101/identity_finetune_data_3
[ "region:us" ]
2023-11-02T18:35:32+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 262944, "num_examples": 815}, {"name": "test", "num_bytes": 28835, "num_examples": 91}], "download_size": 142606, "dataset_size": 291779}}
2023-11-03T04:16:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "identity_finetune_data_3" More Information needed
[ "# Dataset Card for \"identity_finetune_data_3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"identity_finetune_data_3\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"identity_finetune_data_3\"\n\nMore Information needed" ]
0eb711971bed710bea2657da5872d0587c75e084
# Dataset Card for "DM-vton" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SaffalPoosh/DM-vton-test
[ "region:us" ]
2023-11-02T18:52:31+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "person_images", "dtype": "image"}, {"name": "cloth_images", "dtype": "image"}, {"name": "cloth_edge_images", "dtype": "image"}], "splits": [{"name": "test", "num_bytes": 6878828.0, "num_examples": 416}], "download_size": 6469560, "dataset_size": 6878828.0}}
2023-11-02T18:56:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "DM-vton" More Information needed
[ "# Dataset Card for \"DM-vton\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"DM-vton\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"DM-vton\"\n\nMore Information needed" ]
dc8ae8e8b3fb93cae4d1e34745a969bdefe20d38
# AutoTrain Dataset for project: docid ## Dataset Description This dataset has been automatically processed by AutoTrain for project docid. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "text": "MetLife-Walker Information_HI_2023.3.29_14.17_C_B (1).docx.pdf", "feat_url": "datasaur://static/5732/2a298b78-1c2c-4ff8-ad49-357670dd5ea7.pdf", "target": 0, "feat_CarrierName": "Met Life", "feat_ProductTypes": "Hospital Indemnity" }, { "text": "Cima Telecom Inc_Prop (002)_ (2).docx.pdf", "feat_url": "datasaur://static/5732/8adee066-55c4-4f8d-8dcd-53d5fdb42732.pdf", "target": 0, "feat_CarrierName": "Met Life", "feat_ProductTypes": "Basic Life;Basic AD&D;Voluntary Life;Voluntary AD&D;Voluntary Dependent AD&D;Short-term Disability;Long-term Disability;Dental;Vision" } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "text": "Value(dtype='string', id=None)", "feat_url": "Value(dtype='string', id=None)", "target": "ClassLabel(names=['Proposal', 'Summary (including SBC)'], id=None)", "feat_CarrierName": "Value(dtype='string', id=None)", "feat_ProductTypes": "Value(dtype='string', id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 15 | | valid | 5 |
bcombs/autotrain-data-docid
[ "task_categories:text-classification", "region:us" ]
2023-11-02T18:55:53+00:00
{"task_categories": ["text-classification"]}
2023-11-02T19:38:00+00:00
[]
[]
TAGS #task_categories-text-classification #region-us
AutoTrain Dataset for project: docid ==================================== Dataset Description ------------------- This dataset has been automatically processed by AutoTrain for project docid. ### Languages The BCP-47 code for the dataset's language is unk. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#task_categories-text-classification #region-us \n", "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ 17, 27, 17, 23, 27 ]
[ "passage: TAGS\n#task_categories-text-classification #region-us \n### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------### Data Instances\n\n\nA sample from this dataset looks as follows:### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
d56e34920341c0364559b33d6c6ec8bf9af1cb0b
# Dataset Card for "short_data_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
determined-ai/short_data_test
[ "region:us" ]
2023-11-02T18:57:14+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "split_1", "path": "data/split_1-*"}, {"split": "split_2", "path": "data/split_2-*"}, {"split": "split_3", "path": "data/split_3-*"}, {"split": "split_4", "path": "data/split_4-*"}, {"split": "split_5", "path": "data/split_5-*"}, {"split": "split_6", "path": "data/split_6-*"}, {"split": "split_7", "path": "data/split_7-*"}, {"split": "split_8", "path": "data/split_8-*"}, {"split": "split_9", "path": "data/split_9-*"}, {"split": "split_10", "path": "data/split_10-*"}, {"split": "split_11", "path": "data/split_11-*"}, {"split": "split_12", "path": "data/split_12-*"}, {"split": "split_13", "path": "data/split_13-*"}, {"split": "split_14", "path": "data/split_14-*"}, {"split": "train", "path": "data/train-*"}, {"split": "val", "path": "data/val-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "split_1", "num_bytes": 22, "num_examples": 1}, {"name": "split_2", "num_bytes": 44, "num_examples": 2}, {"name": "split_3", "num_bytes": 66, "num_examples": 3}, {"name": "split_4", "num_bytes": 88, "num_examples": 4}, {"name": "split_5", "num_bytes": 110, "num_examples": 5}, {"name": "split_6", "num_bytes": 132, "num_examples": 6}, {"name": "split_7", "num_bytes": 154, "num_examples": 7}, {"name": "split_8", "num_bytes": 176, "num_examples": 8}, {"name": "split_9", "num_bytes": 198, "num_examples": 9}, {"name": "split_10", "num_bytes": 230, "num_examples": 10}, {"name": "split_11", "num_bytes": 254, "num_examples": 11}, {"name": "split_12", "num_bytes": 278, "num_examples": 12}, {"name": "split_13", "num_bytes": 302, "num_examples": 13}, {"name": "split_14", "num_bytes": 326, "num_examples": 14}, {"name": "train", "num_bytes": 20, "num_examples": 1}, {"name": "val", "num_bytes": 18, "num_examples": 1}], "download_size": 14197, "dataset_size": 2418}}
2023-11-02T18:57:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "short_data_test" More Information needed
[ "# Dataset Card for \"short_data_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"short_data_test\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"short_data_test\"\n\nMore Information needed" ]
fadf0f34669d6f90efd1473605c06cfafa3dbec9
# Dataset Card for "training_data_hands" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ksukrit/training_data_hands
[ "region:us" ]
2023-11-02T19:07:14+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "bad", "1": "good"}}}}], "splits": [{"name": "train", "num_bytes": 4654722697.352, "num_examples": 3974}], "download_size": 0, "dataset_size": 4654722697.352}}
2023-11-02T19:16:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "training_data_hands" More Information needed
[ "# Dataset Card for \"training_data_hands\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"training_data_hands\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"training_data_hands\"\n\nMore Information needed" ]
c2c0e6bd30b5c944c4b9248b7da3253297cdf122
# Dataset Card for "DM-vton-train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SaffalPoosh/DM-vton-train
[ "region:us" ]
2023-11-02T19:07:36+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "person_images", "dtype": "image"}, {"name": "person_densepose", "dtype": "image"}, {"name": "person_label", "dtype": "image"}, {"name": "person_pose", "dtype": "image"}, {"name": "cloth_images", "dtype": "image"}, {"name": "cloth_edge_images", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 467128253.52, "num_examples": 6824}], "download_size": 153179210, "dataset_size": 467128253.52}}
2023-11-02T19:14:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "DM-vton-train" More Information needed
[ "# Dataset Card for \"DM-vton-train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"DM-vton-train\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"DM-vton-train\"\n\nMore Information needed" ]
b3bd0d9c611f29b0d1af18c6710c2c8007cc9ced
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** Mani Moayedi - **Language(s) (NLP):** Farsi (Persian) - **License:** MIT ## Uses The samples of this dataset are user comments about products of an online shop website. Each comment contains some additional data alongside the comments body, like star-rating value (0-5). This dataset can be used to train or generate different data-models for NLP tasks like opinion mining and sentiment analysis. ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> This dataset is the result of crawling 7 categories of cosmetic products from a perisan online-shop's product pages. The vocabulary mostly revolves around the cosmetics subjects, therefore it might not be suitable for use cases which needs a generic collection of words and phrases. ## Dataset Structure Each comment is represented in structured format and contains comment's body, comment's title, star-rating value (0-5), Other users reactions to each comment in terms of number-of-likes and number-of-dislikes. and a list of advantages and dis-advantages that user might have specified. title field and advantages/disadvantages fields can be null or empty in many comments. For more details please check out the file [Dataset Description](https://huggingface.co/datasets/Acidmanic/DK-FA-Cosmetics/resolve/main/Dataset%20Description.pdf). ## Dataset Creation The dataset is created using a crawler agains an online shop's website. Comments are scraped from product pages and stored as json, jsonl and csv files. #### Personal and Sensitive Information The dataset contains the username of userse who has posted the comments. All the information in the dataset, including these usernames, are present on the products web-page whithout any login or authentication. ## Bias, Risks, and Limitations From Npl prespective, the dataset might mostly contain information about the the consmetic products and the quality of sellers and resellers service. Therefore considering this dataset as a general source of language might introduce some issues, depending on the use-case. ## Glossary | Set | Number Of Comments | Number Of Products | Average Comments Per Product | | ----------- | ----------- | ----------- |----------- | | **dk-fa-cosmetics** (Full dataset) | 421078 | 8325 | 51 | | dkfacs-eyeliner (sub-set) | 30824 | 284 | 109 | | dkfacs-stand (sub-set) | 83197 | 1738 | 48 | | dkfacs-mascara (sub-set) | 47961 | 338 | 142 | | dkfacs-sun-screen (sub-set) | 118699 | 772 | 154 | | dkfacs-eye-shadow (sub-set) | 14532 | 634 | 23 | | dkfacs-nails (sub-set) | 75209 | 3260 | 23 | | dkfacs-lipsticks (sub-set) | 50656 | 1299 | 39 | ## Dataset Card Authors Mani Moayedi ## Dataset Card Contact [email protected] https://github.com/Acidmanic
Acidmanic/DK-FA-Cosmetics
[ "size_categories:100K<n<1M", "language:fa", "license:mit", "e-commerce", "Online-Shop", "Comments", "region:us" ]
2023-11-02T19:09:20+00:00
{"language": ["fa"], "license": "mit", "size_categories": ["100K<n<1M"], "pretty_name": "Dk-Fa-Cosmetics", "tags": ["e-commerce", "Online-Shop", "Comments"]}
2023-11-03T19:21:25+00:00
[]
[ "fa" ]
TAGS #size_categories-100K<n<1M #language-Persian #license-mit #e-commerce #Online-Shop #Comments #region-us
Dataset Card for Dataset Name ============================= This dataset card aims to be a base template for new datasets. It has been generated using this raw template. Dataset Details --------------- ### Dataset Description * Curated by: Mani Moayedi * Language(s) (NLP): Farsi (Persian) * License: MIT Uses ---- The samples of this dataset are user comments about products of an online shop website. Each comment contains some additional data alongside the comments body, like star-rating value (0-5). This dataset can be used to train or generate different data-models for NLP tasks like opinion mining and sentiment analysis. ### Out-of-Scope Use This dataset is the result of crawling 7 categories of cosmetic products from a perisan online-shop's product pages. The vocabulary mostly revolves around the cosmetics subjects, therefore it might not be suitable for use cases which needs a generic collection of words and phrases. Dataset Structure ----------------- Each comment is represented in structured format and contains comment's body, comment's title, star-rating value (0-5), Other users reactions to each comment in terms of number-of-likes and number-of-dislikes. and a list of advantages and dis-advantages that user might have specified. title field and advantages/disadvantages fields can be null or empty in many comments. For more details please check out the file Dataset Description. Dataset Creation ---------------- The dataset is created using a crawler agains an online shop's website. Comments are scraped from product pages and stored as json, jsonl and csv files. #### Personal and Sensitive Information The dataset contains the username of userse who has posted the comments. All the information in the dataset, including these usernames, are present on the products web-page whithout any login or authentication. Bias, Risks, and Limitations ---------------------------- From Npl prespective, the dataset might mostly contain information about the the consmetic products and the quality of sellers and resellers service. Therefore considering this dataset as a general source of language might introduce some issues, depending on the use-case. Glossary -------- Dataset Card Authors -------------------- Mani Moayedi Dataset Card Contact -------------------- acidmanic.moayedi@URL URL
[ "### Dataset Description\n\n\n* Curated by: Mani Moayedi\n* Language(s) (NLP): Farsi (Persian)\n* License: MIT\n\n\nUses\n----\n\n\nThe samples of this dataset are user comments about products of an online shop website. Each comment contains some\nadditional data alongside the comments body, like star-rating value (0-5). This dataset can be used to train or generate\ndifferent data-models for NLP tasks like opinion mining and sentiment analysis.", "### Out-of-Scope Use\n\n\nThis dataset is the result of crawling 7 categories of cosmetic products from a perisan online-shop's product pages. The vocabulary\nmostly revolves around the cosmetics subjects, therefore it might not be suitable for use cases which needs a generic collection of\nwords and phrases.\n\n\nDataset Structure\n-----------------\n\n\nEach comment is represented\nin structured format and contains comment's body, comment's title, star-rating value (0-5), Other users reactions to\neach comment in terms of number-of-likes and number-of-dislikes. and a list of advantages and dis-advantages that user\nmight have specified. title field and advantages/disadvantages fields can be null or empty in many comments.\n\n\nFor more details please check out the file Dataset Description.\n\n\nDataset Creation\n----------------\n\n\nThe dataset is created using a crawler agains an online shop's website. Comments are scraped from product pages and stored as json, jsonl and csv files.", "#### Personal and Sensitive Information\n\n\nThe dataset contains the username of userse who has posted the comments. All the information in the dataset, including these usernames, are\npresent on the products web-page whithout any login or authentication.\n\n\nBias, Risks, and Limitations\n----------------------------\n\n\nFrom Npl prespective, the dataset might mostly contain information about the the consmetic products and the quality\nof sellers and resellers service. Therefore considering this dataset as a general source of language might introduce\nsome issues, depending on the use-case.\n\n\nGlossary\n--------\n\n\n\nDataset Card Authors\n--------------------\n\n\nMani Moayedi\n\n\nDataset Card Contact\n--------------------\n\n\nacidmanic.moayedi@URL\n\n\nURL" ]
[ "TAGS\n#size_categories-100K<n<1M #language-Persian #license-mit #e-commerce #Online-Shop #Comments #region-us \n", "### Dataset Description\n\n\n* Curated by: Mani Moayedi\n* Language(s) (NLP): Farsi (Persian)\n* License: MIT\n\n\nUses\n----\n\n\nThe samples of this dataset are user comments about products of an online shop website. Each comment contains some\nadditional data alongside the comments body, like star-rating value (0-5). This dataset can be used to train or generate\ndifferent data-models for NLP tasks like opinion mining and sentiment analysis.", "### Out-of-Scope Use\n\n\nThis dataset is the result of crawling 7 categories of cosmetic products from a perisan online-shop's product pages. The vocabulary\nmostly revolves around the cosmetics subjects, therefore it might not be suitable for use cases which needs a generic collection of\nwords and phrases.\n\n\nDataset Structure\n-----------------\n\n\nEach comment is represented\nin structured format and contains comment's body, comment's title, star-rating value (0-5), Other users reactions to\neach comment in terms of number-of-likes and number-of-dislikes. and a list of advantages and dis-advantages that user\nmight have specified. title field and advantages/disadvantages fields can be null or empty in many comments.\n\n\nFor more details please check out the file Dataset Description.\n\n\nDataset Creation\n----------------\n\n\nThe dataset is created using a crawler agains an online shop's website. Comments are scraped from product pages and stored as json, jsonl and csv files.", "#### Personal and Sensitive Information\n\n\nThe dataset contains the username of userse who has posted the comments. All the information in the dataset, including these usernames, are\npresent on the products web-page whithout any login or authentication.\n\n\nBias, Risks, and Limitations\n----------------------------\n\n\nFrom Npl prespective, the dataset might mostly contain information about the the consmetic products and the quality\nof sellers and resellers service. Therefore considering this dataset as a general source of language might introduce\nsome issues, depending on the use-case.\n\n\nGlossary\n--------\n\n\n\nDataset Card Authors\n--------------------\n\n\nMani Moayedi\n\n\nDataset Card Contact\n--------------------\n\n\nacidmanic.moayedi@URL\n\n\nURL" ]
[ 38, 103, 228, 157 ]
[ "passage: TAGS\n#size_categories-100K<n<1M #language-Persian #license-mit #e-commerce #Online-Shop #Comments #region-us \n### Dataset Description\n\n\n* Curated by: Mani Moayedi\n* Language(s) (NLP): Farsi (Persian)\n* License: MIT\n\n\nUses\n----\n\n\nThe samples of this dataset are user comments about products of an online shop website. Each comment contains some\nadditional data alongside the comments body, like star-rating value (0-5). This dataset can be used to train or generate\ndifferent data-models for NLP tasks like opinion mining and sentiment analysis.### Out-of-Scope Use\n\n\nThis dataset is the result of crawling 7 categories of cosmetic products from a perisan online-shop's product pages. The vocabulary\nmostly revolves around the cosmetics subjects, therefore it might not be suitable for use cases which needs a generic collection of\nwords and phrases.\n\n\nDataset Structure\n-----------------\n\n\nEach comment is represented\nin structured format and contains comment's body, comment's title, star-rating value (0-5), Other users reactions to\neach comment in terms of number-of-likes and number-of-dislikes. and a list of advantages and dis-advantages that user\nmight have specified. title field and advantages/disadvantages fields can be null or empty in many comments.\n\n\nFor more details please check out the file Dataset Description.\n\n\nDataset Creation\n----------------\n\n\nThe dataset is created using a crawler agains an online shop's website. Comments are scraped from product pages and stored as json, jsonl and csv files." ]
7306cdc04caf69910ba0f50369c0f2c2e531c910
# Dataset Card for "summarize_from_feedback_tldr_3_filtered_oai_preprocessing_pythia" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vwxyzjn/summarize_from_feedback_tldr_3_filtered_oai_preprocessing_pythia
[ "region:us" ]
2023-11-02T19:18:32+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "subreddit", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "post", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "query_token", "sequence": "int64"}, {"name": "query", "dtype": "string"}, {"name": "reference_response", "dtype": "string"}, {"name": "reference_response_token", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 886229543, "num_examples": 116722}, {"name": "validation", "num_bytes": 48966797, "num_examples": 6447}, {"name": "test", "num_bytes": 49800881, "num_examples": 6553}], "download_size": 338995010, "dataset_size": 984997221}}
2023-11-02T19:19:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "summarize_from_feedback_tldr_3_filtered_oai_preprocessing_pythia" More Information needed
[ "# Dataset Card for \"summarize_from_feedback_tldr_3_filtered_oai_preprocessing_pythia\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"summarize_from_feedback_tldr_3_filtered_oai_preprocessing_pythia\"\n\nMore Information needed" ]
[ 6, 37 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"summarize_from_feedback_tldr_3_filtered_oai_preprocessing_pythia\"\n\nMore Information needed" ]
424a8d37f801e4167432204a1b1e9c9ab76c9ad0
# Dataset Card for "summarize_from_feedback_oai_preprocessing_pythia" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vwxyzjn/summarize_from_feedback_oai_preprocessing_pythia
[ "region:us" ]
2023-11-02T19:31:02+00:00
{"dataset_info": {"features": [{"name": "info", "struct": [{"name": "id", "dtype": "string"}, {"name": "post", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "subreddit", "dtype": "string"}, {"name": "site", "dtype": "string"}, {"name": "article", "dtype": "string"}]}, {"name": "summaries", "list": [{"name": "text", "dtype": "string"}, {"name": "policy", "dtype": "string"}, {"name": "note", "dtype": "string"}]}, {"name": "choice", "dtype": "int32"}, {"name": "worker", "dtype": "string"}, {"name": "batch", "dtype": "string"}, {"name": "split", "dtype": "string"}, {"name": "extra", "struct": [{"name": "confidence", "dtype": "int32"}]}, {"name": "query_token", "sequence": "int64"}, {"name": "query", "dtype": "string"}, {"name": "response0", "dtype": "string"}, {"name": "response0_token", "sequence": "int64"}, {"name": "response1", "dtype": "string"}, {"name": "response1_token", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 782558889, "num_examples": 92858}, {"name": "validation", "num_bytes": 734266349, "num_examples": 86086}], "download_size": 124011801, "dataset_size": 1516825238}}
2023-11-02T19:31:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "summarize_from_feedback_oai_preprocessing_pythia" More Information needed
[ "# Dataset Card for \"summarize_from_feedback_oai_preprocessing_pythia\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"summarize_from_feedback_oai_preprocessing_pythia\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"summarize_from_feedback_oai_preprocessing_pythia\"\n\nMore Information needed" ]
24e33438c3b9f0f48602443f35b330b8ea999847
--- tags: - gpt-4 - janet --- # Description 39 Multi-Turn conversations generated by GPT-4. Each conversation represents a distinct dialogue between two people - a sniper and a central operator called Janet. The context of each conversation is the same - the sniper is perched on a rooftop, on a mission to eliminate a dangerous person that's hiding in the crowd below. Janet knows how that person looks like and is talking the sniper through the process of identifying and eliminating the target. # Schema ``` { "id": string "conversation": [{ "from": string, # system - system-prompt, assistant - LLM, user - human "value": string, # dialogue line }] } ``` # Implementation details Original format in `.txt`, Axolotl-compatible ShareGPT version is `.sharegpt.jsonl`. # Citation ``` @misc{Janet24Oct, title = {Janet24Oct: A Dataset of Multi-Turn Conversations for Conversational AI}, author = {Kieran Donaldson and Mikel Bober-Izar and Piotr Trochim}, year = {2023}, publisher = {HuggingFace}, journal = {HuggingFace repository}, howpublished = {\url{https://huggingface.co/datasets/IconicAI/janet-24oct}}, } ```
IconicAI/janet-24oct
[ "task_categories:conversational", "region:us" ]
2023-11-02T19:33:22+00:00
{"task_categories": ["conversational"]}
2023-11-17T15:21:15+00:00
[]
[]
TAGS #task_categories-conversational #region-us
--- tags: - gpt-4 - janet --- # Description 39 Multi-Turn conversations generated by GPT-4. Each conversation represents a distinct dialogue between two people - a sniper and a central operator called Janet. The context of each conversation is the same - the sniper is perched on a rooftop, on a mission to eliminate a dangerous person that's hiding in the crowd below. Janet knows how that person looks like and is talking the sniper through the process of identifying and eliminating the target. # Schema # Implementation details Original format in '.txt', Axolotl-compatible ShareGPT version is '.URL'.
[ "# Description\n39 Multi-Turn conversations generated by GPT-4.\nEach conversation represents a distinct dialogue between two people - \na sniper and a central operator called Janet.\n\nThe context of each conversation is the same - the sniper is perched on a rooftop,\non a mission to eliminate a dangerous person that's hiding in the crowd below.\nJanet knows how that person looks like and is talking the sniper through the process\nof identifying and eliminating the target.", "# Schema", "# Implementation details\nOriginal format in '.txt', Axolotl-compatible ShareGPT version is '.URL'." ]
[ "TAGS\n#task_categories-conversational #region-us \n", "# Description\n39 Multi-Turn conversations generated by GPT-4.\nEach conversation represents a distinct dialogue between two people - \na sniper and a central operator called Janet.\n\nThe context of each conversation is the same - the sniper is perched on a rooftop,\non a mission to eliminate a dangerous person that's hiding in the crowd below.\nJanet knows how that person looks like and is talking the sniper through the process\nof identifying and eliminating the target.", "# Schema", "# Implementation details\nOriginal format in '.txt', Axolotl-compatible ShareGPT version is '.URL'." ]
[ 16, 106, 3, 30 ]
[ "passage: TAGS\n#task_categories-conversational #region-us \n# Description\n39 Multi-Turn conversations generated by GPT-4.\nEach conversation represents a distinct dialogue between two people - \na sniper and a central operator called Janet.\n\nThe context of each conversation is the same - the sniper is perched on a rooftop,\non a mission to eliminate a dangerous person that's hiding in the crowd below.\nJanet knows how that person looks like and is talking the sniper through the process\nof identifying and eliminating the target.# Schema# Implementation details\nOriginal format in '.txt', Axolotl-compatible ShareGPT version is '.URL'." ]
f74678ae56fe95a5298e14afa6690273ea2b923f
# Dataset Card for "audio_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dhanush23/audio_dataset
[ "region:us" ]
2023-11-02T19:40:56+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "file_path", "dtype": "string"}, {"name": "emotion", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 312421.43086865044, "num_examples": 4657}, {"name": "test", "num_bytes": 133971.56913134956, "num_examples": 1997}], "download_size": 66671, "dataset_size": 446393.0}}
2023-11-02T19:48:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "audio_dataset" More Information needed
[ "# Dataset Card for \"audio_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"audio_dataset\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"audio_dataset\"\n\nMore Information needed" ]
f8f8b7f0b94642476020587eb2b4f20ceacf01fe
# Dataset Card for "tla_masked_code_eval" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
aneeshas/tla_masked_code_eval
[ "region:us" ]
2023-11-02T20:36:39+00:00
{"dataset_info": {"features": [{"name": "protocol", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 139933, "num_examples": 18}], "download_size": 52239, "dataset_size": 139933}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-03T01:56:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tla_masked_code_eval" More Information needed
[ "# Dataset Card for \"tla_masked_code_eval\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tla_masked_code_eval\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"tla_masked_code_eval\"\n\nMore Information needed" ]
142908dc0ec4dd373adfea9b1c52a593deeab7a6
# Dataset Card for "ios_action" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
anamhira/ios_action
[ "region:us" ]
2023-11-02T20:50:12+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "valid", "path": "data/valid-*"}]}], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 482012, "num_examples": 233}, {"name": "valid", "num_bytes": 5762, "num_examples": 3}], "download_size": 79950, "dataset_size": 487774}}
2023-11-14T19:14:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ios_action" More Information needed
[ "# Dataset Card for \"ios_action\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ios_action\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ios_action\"\n\nMore Information needed" ]
14efcdde20ec0d5b3c61748010914b1dedbaa656
# Dataset Card for "demodata1102" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Bsbell21/demodata1102
[ "region:us" ]
2023-11-02T20:51:18+00:00
{"dataset_info": {"features": [{"name": "item", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "ad", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1671, "num_examples": 5}], "download_size": 4854, "dataset_size": 1671}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-02T20:51:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "demodata1102" More Information needed
[ "# Dataset Card for \"demodata1102\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"demodata1102\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"demodata1102\"\n\nMore Information needed" ]
22fb6882b523a19197e7fa23881d9945de2fa246
# Dataset Card for Evaluation run of TheBloke/LongChat-13B-GPTQ ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/TheBloke/LongChat-13B-GPTQ - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [TheBloke/LongChat-13B-GPTQ](https://huggingface.co/TheBloke/LongChat-13B-GPTQ) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_TheBloke__LongChat-13B-GPTQ_public", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-11-04T20:06:00.633401](https://huggingface.co/datasets/open-llm-leaderboard/details_TheBloke__LongChat-13B-GPTQ_public/blob/main/results_2023-11-04T20-06-00.633401.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.0, "em_stderr": 0.0, "f1": 5.243288590604027e-05, "f1_stderr": 5.243288590604094e-05, "acc": 0.2557221783741121, "acc_stderr": 0.007024402099929664 }, "harness|drop|3": { "em": 0.0, "em_stderr": 0.0, "f1": 5.243288590604027e-05, "f1_stderr": 5.243288590604094e-05 }, "harness|gsm8k|5": { "acc": 0.0, "acc_stderr": 0.0 }, "harness|winogrande|5": { "acc": 0.5114443567482242, "acc_stderr": 0.014048804199859329 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_TheBloke__LongChat-13B-GPTQ
[ "region:us" ]
2023-11-02T21:34:51+00:00
{"pretty_name": "Evaluation run of TheBloke/LongChat-13B-GPTQ", "dataset_summary": "Dataset automatically created during the evaluation run of model [TheBloke/LongChat-13B-GPTQ](https://huggingface.co/TheBloke/LongChat-13B-GPTQ) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_TheBloke__LongChat-13B-GPTQ_public\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-11-04T20:06:00.633401](https://huggingface.co/datasets/open-llm-leaderboard/details_TheBloke__LongChat-13B-GPTQ_public/blob/main/results_2023-11-04T20-06-00.633401.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.0,\n \"em_stderr\": 0.0,\n \"f1\": 5.243288590604027e-05,\n \"f1_stderr\": 5.243288590604094e-05,\n \"acc\": 0.2557221783741121,\n \"acc_stderr\": 0.007024402099929664\n },\n \"harness|drop|3\": {\n \"em\": 0.0,\n \"em_stderr\": 0.0,\n \"f1\": 5.243288590604027e-05,\n \"f1_stderr\": 5.243288590604094e-05\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.0,\n \"acc_stderr\": 0.0\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.5114443567482242,\n \"acc_stderr\": 0.014048804199859329\n }\n}\n```", "repo_url": "https://huggingface.co/TheBloke/LongChat-13B-GPTQ", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_11_02T21_34_31.634462", "path": ["**/details_harness|drop|3_2023-11-02T21-34-31.634462.parquet"]}, {"split": "2023_11_04T20_06_00.633401", "path": ["**/details_harness|drop|3_2023-11-04T20-06-00.633401.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-11-04T20-06-00.633401.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_11_02T21_34_31.634462", "path": ["**/details_harness|gsm8k|5_2023-11-02T21-34-31.634462.parquet"]}, {"split": "2023_11_04T20_06_00.633401", "path": ["**/details_harness|gsm8k|5_2023-11-04T20-06-00.633401.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-11-04T20-06-00.633401.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_11_02T21_34_31.634462", "path": ["**/details_harness|winogrande|5_2023-11-02T21-34-31.634462.parquet"]}, {"split": "2023_11_04T20_06_00.633401", "path": ["**/details_harness|winogrande|5_2023-11-04T20-06-00.633401.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-11-04T20-06-00.633401.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_11_02T21_34_31.634462", "path": ["results_2023-11-02T21-34-31.634462.parquet"]}, {"split": "2023_11_04T20_06_00.633401", "path": ["results_2023-11-04T20-06-00.633401.parquet"]}, {"split": "latest", "path": ["results_2023-11-04T20-06-00.633401.parquet"]}]}]}
2023-11-04T20:06:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of TheBloke/LongChat-13B-GPTQ ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model TheBloke/LongChat-13B-GPTQ on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-11-04T20:06:00.633401(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of TheBloke/LongChat-13B-GPTQ", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model TheBloke/LongChat-13B-GPTQ on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-11-04T20:06:00.633401(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of TheBloke/LongChat-13B-GPTQ", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model TheBloke/LongChat-13B-GPTQ on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-11-04T20:06:00.633401(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 22, 31, 171, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of TheBloke/LongChat-13B-GPTQ## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model TheBloke/LongChat-13B-GPTQ on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 2 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-11-04T20:06:00.633401(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
816593c47405818a7fcab394a1b774e6b82cbf8e
# Dataset Card for "spanish-legal-data-lite" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Ramitha/spanish-legal-data-lite
[ "region:us" ]
2023-11-02T22:01:55+00:00
{"dataset_info": {"features": [{"name": "Data", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 122971, "num_examples": 501}], "download_size": 62737, "dataset_size": 122971}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-03T15:47:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "spanish-legal-data-lite" More Information needed
[ "# Dataset Card for \"spanish-legal-data-lite\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"spanish-legal-data-lite\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"spanish-legal-data-lite\"\n\nMore Information needed" ]
e6539c262810e2c703a6601ce984c123845dcc9d
# Dataset Card for "named_math_formulas" Mathematical dataset based on 71 famous mathematical identities. Each entry consists of a name of the identity (*name*), a representation of that identity (*formula*), a label whether the representation belongs to the identity (*label*), and an id of the mathematical identity (*formula_name_id*). The false pairs are intentionally challenging, e.g., `a^2+2^b=c^2`as falsified version of the Pythagorean Theorem. All entries have been generated by using [data.json](data.json) as starting point and applying the randomizing and falsifying algorithms [here](https://github.com/jdrechsel13/sympy-random-LaTeX). The formulas in the dataset are not just pure mathematical, but contain also textual descriptions of the mathematical identity. At most 400000 versions are generated per identity. There are ten times more falsified versions than true ones, such that the dataset can be used for a training with changing false examples every epoch.
ddrg/named_math_formulas
[ "region:us" ]
2023-11-02T22:35:01+00:00
{"dataset_info": {"features": [{"name": "name", "dtype": "string"}, {"name": "formula", "dtype": "string"}, {"name": "formula_name_id", "dtype": "string"}, {"name": "label", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 4435850714, "num_examples": 21336656}, {"name": "test", "num_bytes": 492646856, "num_examples": 2370736}], "download_size": 1948482299, "dataset_size": 4928497570}}
2023-11-07T16:41:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "named_math_formulas" Mathematical dataset based on 71 famous mathematical identities. Each entry consists of a name of the identity (*name*), a representation of that identity (*formula*), a label whether the representation belongs to the identity (*label*), and an id of the mathematical identity (*formula_name_id*). The false pairs are intentionally challenging, e.g., 'a^2+2^b=c^2'as falsified version of the Pythagorean Theorem. All entries have been generated by using URL as starting point and applying the randomizing and falsifying algorithms here. The formulas in the dataset are not just pure mathematical, but contain also textual descriptions of the mathematical identity. At most 400000 versions are generated per identity. There are ten times more falsified versions than true ones, such that the dataset can be used for a training with changing false examples every epoch.
[ "# Dataset Card for \"named_math_formulas\"\n\nMathematical dataset based on 71 famous mathematical identities. Each entry consists of a name of the identity (*name*), a representation of that identity (*formula*), a label whether the representation belongs to the identity (*label*), and an id of the mathematical identity (*formula_name_id*). The false pairs are intentionally challenging, e.g., 'a^2+2^b=c^2'as falsified version of the Pythagorean Theorem. All entries have been generated by using URL as starting point and applying the randomizing and falsifying algorithms here. The formulas in the dataset are not just pure mathematical, but contain also textual descriptions of the mathematical identity. At most 400000 versions are generated per identity. There are ten times more falsified versions than true ones, such that the dataset can be used for a training with changing false examples every epoch." ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"named_math_formulas\"\n\nMathematical dataset based on 71 famous mathematical identities. Each entry consists of a name of the identity (*name*), a representation of that identity (*formula*), a label whether the representation belongs to the identity (*label*), and an id of the mathematical identity (*formula_name_id*). The false pairs are intentionally challenging, e.g., 'a^2+2^b=c^2'as falsified version of the Pythagorean Theorem. All entries have been generated by using URL as starting point and applying the randomizing and falsifying algorithms here. The formulas in the dataset are not just pure mathematical, but contain also textual descriptions of the mathematical identity. At most 400000 versions are generated per identity. There are ten times more falsified versions than true ones, such that the dataset can be used for a training with changing false examples every epoch." ]
[ 6, 229 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"named_math_formulas\"\n\nMathematical dataset based on 71 famous mathematical identities. Each entry consists of a name of the identity (*name*), a representation of that identity (*formula*), a label whether the representation belongs to the identity (*label*), and an id of the mathematical identity (*formula_name_id*). The false pairs are intentionally challenging, e.g., 'a^2+2^b=c^2'as falsified version of the Pythagorean Theorem. All entries have been generated by using URL as starting point and applying the randomizing and falsifying algorithms here. The formulas in the dataset are not just pure mathematical, but contain also textual descriptions of the mathematical identity. At most 400000 versions are generated per identity. There are ten times more falsified versions than true ones, such that the dataset can be used for a training with changing false examples every epoch." ]
005d3de5ba3cbd9fb5a51fb57ded6af4aa8b9993
# Dataset Card for "math_formula_retrieval" Mathematical dataset based on 71 famous mathematical identities. Each entry consists of two identities (in formula or textual form), together with a label, whether the two versions describe the same mathematical identity. The false pairs are not randomly chosen, but intentionally hard by modifying equivalent representations (see [ddrg/named_math_formulas](https://huggingface.co/datasets/ddrg/named_math_formulas) for more information). At most 400000 versions are generated per identity. There are ten times more falsified versions than true ones, such that the dataset can be used for a training with changing false examples every epoch.
ddrg/math_formula_retrieval
[ "region:us" ]
2023-11-02T22:41:41+00:00
{"dataset_info": {"features": [{"name": "formula1", "dtype": "string"}, {"name": "formula2", "dtype": "string"}, {"name": "label", "dtype": "bool"}, {"name": "formula1_name_id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 7285320882, "num_examples": 21348512}, {"name": "test", "num_bytes": 809630657, "num_examples": 2372048}], "download_size": 3656462517, "dataset_size": 8094951539}}
2023-11-15T21:52:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for "math_formula_retrieval" Mathematical dataset based on 71 famous mathematical identities. Each entry consists of two identities (in formula or textual form), together with a label, whether the two versions describe the same mathematical identity. The false pairs are not randomly chosen, but intentionally hard by modifying equivalent representations (see ddrg/named_math_formulas for more information). At most 400000 versions are generated per identity. There are ten times more falsified versions than true ones, such that the dataset can be used for a training with changing false examples every epoch.
[ "# Dataset Card for \"math_formula_retrieval\"\n\nMathematical dataset based on 71 famous mathematical identities. Each entry consists of two identities (in formula or textual form), together with a label, whether the two versions describe the same mathematical identity. The false pairs are not randomly chosen, but intentionally hard by modifying equivalent representations (see ddrg/named_math_formulas for more information). At most 400000 versions are generated per identity. There are ten times more falsified versions than true ones, such that the dataset can be used for a training with changing false examples every epoch." ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"math_formula_retrieval\"\n\nMathematical dataset based on 71 famous mathematical identities. Each entry consists of two identities (in formula or textual form), together with a label, whether the two versions describe the same mathematical identity. The false pairs are not randomly chosen, but intentionally hard by modifying equivalent representations (see ddrg/named_math_formulas for more information). At most 400000 versions are generated per identity. There are ten times more falsified versions than true ones, such that the dataset can be used for a training with changing false examples every epoch." ]
[ 6, 149 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"math_formula_retrieval\"\n\nMathematical dataset based on 71 famous mathematical identities. Each entry consists of two identities (in formula or textual form), together with a label, whether the two versions describe the same mathematical identity. The false pairs are not randomly chosen, but intentionally hard by modifying equivalent representations (see ddrg/named_math_formulas for more information). At most 400000 versions are generated per identity. There are ten times more falsified versions than true ones, such that the dataset can be used for a training with changing false examples every epoch." ]
a1846f1dea88982b61dbe5eea107fbb2064d486a
The Brahe-Novels dataset is a collection of annotated novel excerpts in the public domain. It was originally created to train Brahe, an LLM fine-tuned for literary analysis. Most of the texts come from the Gutenberg project. The annotations include a mix of synthetic data and manual annotations. In accordance with the principles laid out by the US copyright office, all synthetic data and hybrid synthetic data are in the public domain as well.
Pclanglais/Brahe-Novels
[ "license:cc0-1.0", "region:us" ]
2023-11-02T23:04:43+00:00
{"license": "cc0-1.0"}
2023-11-02T23:11:30+00:00
[]
[]
TAGS #license-cc0-1.0 #region-us
The Brahe-Novels dataset is a collection of annotated novel excerpts in the public domain. It was originally created to train Brahe, an LLM fine-tuned for literary analysis. Most of the texts come from the Gutenberg project. The annotations include a mix of synthetic data and manual annotations. In accordance with the principles laid out by the US copyright office, all synthetic data and hybrid synthetic data are in the public domain as well.
[]
[ "TAGS\n#license-cc0-1.0 #region-us \n" ]
[ 14 ]
[ "passage: TAGS\n#license-cc0-1.0 #region-us \n" ]
6ffa55b4280fa592b145bc7c75b35d50e4171d70
# Dataset Card for "temario_data-wiki_results" Results of the model arthurmluz/ptt5-wikilingua-30epochs on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.1419043757118125, 'rouge2': 0.046527868660995036, 'rougeL': 0.10439338473491701, 'rougeLsum': 0.10439338473491701} bert= {'precision': 0.7189946889877319, 'recall': 0.6085030937194824, 'f1': 0.6588504672050476} mover 0.5431404769938165
arthurmluz/temario_data-wiki_results
[ "region:us" ]
2023-11-02T23:06:26+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 205304, "num_examples": 25}], "download_size": 161820, "dataset_size": 205304}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:20:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "temario_data-wiki_results" Results of the model arthurmluz/ptt5-wikilingua-30epochs on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.1419043757118125, 'rouge2': 0.046527868660995036, 'rougeL': 0.10439338473491701, 'rougeLsum': 0.10439338473491701} bert= {'precision': 0.7189946889877319, 'recall': 0.6085030937194824, 'f1': 0.6588504672050476} mover 0.5431404769938165
[ "# Dataset Card for \"temario_data-wiki_results\"\n\nResults of the model arthurmluz/ptt5-wikilingua-30epochs on the dataset godoyj/temario.\n\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.1419043757118125, 'rouge2': 0.046527868660995036, 'rougeL': 0.10439338473491701, 'rougeLsum': 0.10439338473491701}\n\nbert= {'precision': 0.7189946889877319, 'recall': 0.6085030937194824, 'f1': 0.6588504672050476}\n\nmover 0.5431404769938165" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"temario_data-wiki_results\"\n\nResults of the model arthurmluz/ptt5-wikilingua-30epochs on the dataset godoyj/temario.\n\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.1419043757118125, 'rouge2': 0.046527868660995036, 'rougeL': 0.10439338473491701, 'rougeLsum': 0.10439338473491701}\n\nbert= {'precision': 0.7189946889877319, 'recall': 0.6085030937194824, 'f1': 0.6588504672050476}\n\nmover 0.5431404769938165" ]
[ 6, 192 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"temario_data-wiki_results\"\n\nResults of the model arthurmluz/ptt5-wikilingua-30epochs on the dataset godoyj/temario.\n\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.1419043757118125, 'rouge2': 0.046527868660995036, 'rougeL': 0.10439338473491701, 'rougeLsum': 0.10439338473491701}\n\nbert= {'precision': 0.7189946889877319, 'recall': 0.6085030937194824, 'f1': 0.6588504672050476}\n\nmover 0.5431404769938165" ]
b9d7d86d6e8e876b8f3541516dfb38576c28a8d5
# Dataset Card for "temario_data-wiki_1024_results" Results of the model arthurmluz/ptt5-wikilingua-1024 on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.1737841100453722, 'rouge2': 0.05694408293393681, 'rougeL': 0.12373628458017233, 'rougeLsum': 0.12373628458017233} bert= {'precision': 0.7249869775772094, 'recall': 0.620260682106018, 'f1': 0.6683329963684081} mover = 0.5512191986770616
arthurmluz/temario_data-wiki_1024_results
[ "region:us" ]
2023-11-02T23:11:26+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 206635, "num_examples": 25}], "download_size": 163078, "dataset_size": 206635}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:20:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "temario_data-wiki_1024_results" Results of the model arthurmluz/ptt5-wikilingua-1024 on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.1737841100453722, 'rouge2': 0.05694408293393681, 'rougeL': 0.12373628458017233, 'rougeLsum': 0.12373628458017233} bert= {'precision': 0.7249869775772094, 'recall': 0.620260682106018, 'f1': 0.6683329963684081} mover = 0.5512191986770616
[ "# Dataset Card for \"temario_data-wiki_1024_results\"\n\nResults of the model arthurmluz/ptt5-wikilingua-1024 on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.1737841100453722, 'rouge2': 0.05694408293393681, 'rougeL': 0.12373628458017233, 'rougeLsum': 0.12373628458017233}\n\nbert= {'precision': 0.7249869775772094, 'recall': 0.620260682106018, 'f1': 0.6683329963684081}\n\nmover = 0.5512191986770616" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"temario_data-wiki_1024_results\"\n\nResults of the model arthurmluz/ptt5-wikilingua-1024 on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.1737841100453722, 'rouge2': 0.05694408293393681, 'rougeL': 0.12373628458017233, 'rougeLsum': 0.12373628458017233}\n\nbert= {'precision': 0.7249869775772094, 'recall': 0.620260682106018, 'f1': 0.6683329963684081}\n\nmover = 0.5512191986770616" ]
[ 6, 196 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"temario_data-wiki_1024_results\"\n\nResults of the model arthurmluz/ptt5-wikilingua-1024 on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.1737841100453722, 'rouge2': 0.05694408293393681, 'rougeL': 0.12373628458017233, 'rougeLsum': 0.12373628458017233}\n\nbert= {'precision': 0.7249869775772094, 'recall': 0.620260682106018, 'f1': 0.6683329963684081}\n\nmover = 0.5512191986770616" ]
3652dccd6c939ad701c07bdbad98182cad06bf4c
Repackaging of HuggingFaceH4/ultrachat_200k
typeof/ultrachat-200k
[ "license:mit", "region:us" ]
2023-11-02T23:13:56+00:00
{"license": "mit"}
2023-11-02T23:19:00+00:00
[]
[]
TAGS #license-mit #region-us
Repackaging of HuggingFaceH4/ultrachat_200k
[]
[ "TAGS\n#license-mit #region-us \n" ]
[ 11 ]
[ "passage: TAGS\n#license-mit #region-us \n" ]
bbb0e7ae0e433df3b1c7638e776c7af17248d2eb
This is the ElektraGo FAQs dataset but after applying data augmentation. This repo has 3 versions of the dataset: **1. Raw Dataset with data Augmentation** - ElektraGo_FAQs_Augmented.csv - ElektraGo_FAQs-Augmented-en.json **2. Dataset in Llama2 prompt format** - ElektraGo_FAQs-Text-en.json - ElektraGo_FAQs_Text.csv **3. Dataset in Llama2 prompt format with system prompts** - ElektraGo_FAQs_Text_SystemPrompts.csv
Fraternitas/ElektraGoFAQs-aug-text-en
[ "task_categories:question-answering", "size_categories:n<1K", "language:en", "region:us" ]
2023-11-02T23:15:05+00:00
{"language": ["en"], "size_categories": ["n<1K"], "task_categories": ["question-answering"]}
2023-11-02T23:44:39+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #size_categories-n<1K #language-English #region-us
This is the ElektraGo FAQs dataset but after applying data augmentation. This repo has 3 versions of the dataset: 1. Raw Dataset with data Augmentation - ElektraGo_FAQs_Augmented.csv - ElektraGo_FAQs-URL 2. Dataset in Llama2 prompt format - ElektraGo_FAQs-URL - ElektraGo_FAQs_Text.csv 3. Dataset in Llama2 prompt format with system prompts - ElektraGo_FAQs_Text_SystemPrompts.csv
[]
[ "TAGS\n#task_categories-question-answering #size_categories-n<1K #language-English #region-us \n" ]
[ 32 ]
[ "passage: TAGS\n#task_categories-question-answering #size_categories-n<1K #language-English #region-us \n" ]
a72442fdf871629c8396694ea5362434a08f5728
# Dataset Card for "contracts_v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
paul-w-qs/contracts_v2
[ "region:us" ]
2023-11-02T23:16:10+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "N_ROWS", "dtype": "int64"}, {"name": "N_COLS", "dtype": "int64"}, {"name": "FONT_SIZE", "dtype": "int64"}, {"name": "FONT_NAME", "dtype": "string"}, {"name": "BORDER_THICKNESS", "dtype": "int64"}, {"name": "NOISED", "dtype": "bool"}, {"name": "LABEL_NOISE", "dtype": "bool"}, {"name": "JSON_LABEL", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 961858267.064, "num_examples": 11871}], "download_size": 947911506, "dataset_size": 961858267.064}}
2023-11-02T23:23:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "contracts_v2" More Information needed
[ "# Dataset Card for \"contracts_v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"contracts_v2\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"contracts_v2\"\n\nMore Information needed" ]
0cb4cb943d55a6a39938f603639c64e4b9597fe4
# Dataset Card for "temario_data-wiki_temario_results" Results of the model arthurmluz/ptt5-wikilingua-temario on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.49074908228642644, 'rouge2': 0.20170619333420284, 'rougeL': 0.300604099669828, 'rougeLsum': 0.300604099669828} bert= {'precision': 0.7447994518280029, 'recall': 0.7516166639328002, 'f1': 0.7480760717391968} mover = 0.6407881714739111
arthurmluz/temario_data-wiki_temario_results
[ "region:us" ]
2023-11-02T23:17:21+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 237696, "num_examples": 25}], "download_size": 190348, "dataset_size": 237696}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:24:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "temario_data-wiki_temario_results" Results of the model arthurmluz/ptt5-wikilingua-temario on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.49074908228642644, 'rouge2': 0.20170619333420284, 'rougeL': 0.300604099669828, 'rougeLsum': 0.300604099669828} bert= {'precision': 0.7447994518280029, 'recall': 0.7516166639328002, 'f1': 0.7480760717391968} mover = 0.6407881714739111
[ "# Dataset Card for \"temario_data-wiki_temario_results\"\n\nResults of the model arthurmluz/ptt5-wikilingua-temario on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.49074908228642644, 'rouge2': 0.20170619333420284, 'rougeL': 0.300604099669828, 'rougeLsum': 0.300604099669828}\n\nbert= {'precision': 0.7447994518280029, 'recall': 0.7516166639328002, 'f1': 0.7480760717391968}\n\nmover = 0.6407881714739111" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"temario_data-wiki_temario_results\"\n\nResults of the model arthurmluz/ptt5-wikilingua-temario on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.49074908228642644, 'rouge2': 0.20170619333420284, 'rougeL': 0.300604099669828, 'rougeLsum': 0.300604099669828}\n\nbert= {'precision': 0.7447994518280029, 'recall': 0.7516166639328002, 'f1': 0.7480760717391968}\n\nmover = 0.6407881714739111" ]
[ 6, 199 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"temario_data-wiki_temario_results\"\n\nResults of the model arthurmluz/ptt5-wikilingua-temario on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.49074908228642644, 'rouge2': 0.20170619333420284, 'rougeL': 0.300604099669828, 'rougeLsum': 0.300604099669828}\n\nbert= {'precision': 0.7447994518280029, 'recall': 0.7516166639328002, 'f1': 0.7480760717391968}\n\nmover = 0.6407881714739111" ]
6ac371460f1385397b17cd123a59f575a718d3f6
# Dataset Card for "temario_data-temario_results" Results of the model arthurmluz/ptt5-temario on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.43610795130617214, 'rouge2': 0.1909123032482415, 'rougeL': 0.2712262438292888, 'rougeLsum': 0.2712262438292888} bert= {'precision': 0.7376330280303955, 'recall': 0.7247375249862671, 'f1': 0.7302069187164306} mover = 0.6236951679558348
arthurmluz/temario_data-temario_results
[ "region:us" ]
2023-11-02T23:28:54+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 232168, "num_examples": 25}], "download_size": 184320, "dataset_size": 232168}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:18:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "temario_data-temario_results" Results of the model arthurmluz/ptt5-temario on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.43610795130617214, 'rouge2': 0.1909123032482415, 'rougeL': 0.2712262438292888, 'rougeLsum': 0.2712262438292888} bert= {'precision': 0.7376330280303955, 'recall': 0.7247375249862671, 'f1': 0.7302069187164306} mover = 0.6236951679558348
[ "# Dataset Card for \"temario_data-temario_results\"\n\n\nResults of the model arthurmluz/ptt5-temario on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.43610795130617214, 'rouge2': 0.1909123032482415, 'rougeL': 0.2712262438292888, 'rougeLsum': 0.2712262438292888}\n\nbert= {'precision': 0.7376330280303955, 'recall': 0.7247375249862671, 'f1': 0.7302069187164306}\n\nmover = 0.6236951679558348" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"temario_data-temario_results\"\n\n\nResults of the model arthurmluz/ptt5-temario on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.43610795130617214, 'rouge2': 0.1909123032482415, 'rougeL': 0.2712262438292888, 'rougeLsum': 0.2712262438292888}\n\nbert= {'precision': 0.7376330280303955, 'recall': 0.7247375249862671, 'f1': 0.7302069187164306}\n\nmover = 0.6236951679558348" ]
[ 6, 196 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"temario_data-temario_results\"\n\n\nResults of the model arthurmluz/ptt5-temario on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.43610795130617214, 'rouge2': 0.1909123032482415, 'rougeL': 0.2712262438292888, 'rougeLsum': 0.2712262438292888}\n\nbert= {'precision': 0.7376330280303955, 'recall': 0.7247375249862671, 'f1': 0.7302069187164306}\n\nmover = 0.6236951679558348" ]
7166a8b8efc96a3919dd13ff45bc5ee0f96840bf
# Dataset Card for "temario_data-xlsumm_results" Results of the model arthurmluz/ptt5-xlsumm-30epochs on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.13240213183008717, 'rouge2': 0.043402591111519936, 'rougeL': 0.09099180516319158, 'rougeLsum': 0.09099180516319158} bert= {'precision': 0.706232750415802, 'recall': 0.5906172156333923, 'f1': 0.643077917098999} mover = 0.5389906032463848
arthurmluz/temario_data-xlsum_results
[ "region:us" ]
2023-11-02T23:32:20+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 204994, "num_examples": 25}], "download_size": 161936, "dataset_size": 204994}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:28:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "temario_data-xlsumm_results" Results of the model arthurmluz/ptt5-xlsumm-30epochs on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.13240213183008717, 'rouge2': 0.043402591111519936, 'rougeL': 0.09099180516319158, 'rougeLsum': 0.09099180516319158} bert= {'precision': 0.706232750415802, 'recall': 0.5906172156333923, 'f1': 0.643077917098999} mover = 0.5389906032463848
[ "# Dataset Card for \"temario_data-xlsumm_results\"\n\nResults of the model arthurmluz/ptt5-xlsumm-30epochs on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.13240213183008717, 'rouge2': 0.043402591111519936, 'rougeL': 0.09099180516319158, 'rougeLsum': 0.09099180516319158}\n\nbert= {'precision': 0.706232750415802, 'recall': 0.5906172156333923, 'f1': 0.643077917098999}\n\nmover = 0.5389906032463848" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"temario_data-xlsumm_results\"\n\nResults of the model arthurmluz/ptt5-xlsumm-30epochs on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.13240213183008717, 'rouge2': 0.043402591111519936, 'rougeL': 0.09099180516319158, 'rougeLsum': 0.09099180516319158}\n\nbert= {'precision': 0.706232750415802, 'recall': 0.5906172156333923, 'f1': 0.643077917098999}\n\nmover = 0.5389906032463848" ]
[ 6, 199 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"temario_data-xlsumm_results\"\n\nResults of the model arthurmluz/ptt5-xlsumm-30epochs on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.13240213183008717, 'rouge2': 0.043402591111519936, 'rougeL': 0.09099180516319158, 'rougeLsum': 0.09099180516319158}\n\nbert= {'precision': 0.706232750415802, 'recall': 0.5906172156333923, 'f1': 0.643077917098999}\n\nmover = 0.5389906032463848" ]
56b1133845f543dc485cfe68bfde969e00e8671c
# Dataset Card for "temario_data-xlsumm_cstnews_results" Results of the model arthurmluz/ptt5-xlsumm-cstnews on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.3989839955543582, 'rouge2': 0.1474448806452692, 'rougeL': 0.22985023944862537, 'rougeLsum': 0.22985023944862537} bert= {'precision': 0.7192043924331665, 'recall': 0.701116726398468, 'f1': 0.7095820474624633} mover = 0.6149315208622308
arthurmluz/temario_data-xlsum_cstnews_results
[ "region:us" ]
2023-11-02T23:52:13+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 225479, "num_examples": 25}], "download_size": 180649, "dataset_size": 225479}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:29:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "temario_data-xlsumm_cstnews_results" Results of the model arthurmluz/ptt5-xlsumm-cstnews on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.3989839955543582, 'rouge2': 0.1474448806452692, 'rougeL': 0.22985023944862537, 'rougeLsum': 0.22985023944862537} bert= {'precision': 0.7192043924331665, 'recall': 0.701116726398468, 'f1': 0.7095820474624633} mover = 0.6149315208622308
[ "# Dataset Card for \"temario_data-xlsumm_cstnews_results\"\n\nResults of the model arthurmluz/ptt5-xlsumm-cstnews on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.3989839955543582, 'rouge2': 0.1474448806452692, 'rougeL': 0.22985023944862537, 'rougeLsum': 0.22985023944862537}\n\nbert= {'precision': 0.7192043924331665, 'recall': 0.701116726398468, 'f1': 0.7095820474624633}\n\nmover = 0.6149315208622308" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"temario_data-xlsumm_cstnews_results\"\n\nResults of the model arthurmluz/ptt5-xlsumm-cstnews on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.3989839955543582, 'rouge2': 0.1474448806452692, 'rougeL': 0.22985023944862537, 'rougeLsum': 0.22985023944862537}\n\nbert= {'precision': 0.7192043924331665, 'recall': 0.701116726398468, 'f1': 0.7095820474624633}\n\nmover = 0.6149315208622308" ]
[ 6, 202 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"temario_data-xlsumm_cstnews_results\"\n\nResults of the model arthurmluz/ptt5-xlsumm-cstnews on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.3989839955543582, 'rouge2': 0.1474448806452692, 'rougeL': 0.22985023944862537, 'rougeLsum': 0.22985023944862537}\n\nbert= {'precision': 0.7192043924331665, 'recall': 0.701116726398468, 'f1': 0.7095820474624633}\n\nmover = 0.6149315208622308" ]
ee973c635aa7110fe9cb8a5622fa20a0dd70dbf7
# Dataset Card for "temario_data-wikilingua_cstnews_results" Results of the model arthurmluz/ptt5-wikilingua-cstnews on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.3800757744192324, 'rouge2': 0.1539001654491066, 'rougeL': 0.2346540497659127, 'rougeLsum': 0.2346540497659127} bert= {'precision': 0.7361391615867615, 'recall': 0.6891939973831177, 'f1': 0.711702299118042} mover = 0.6075434818512242
arthurmluz/temario_data-wiki_cstnews_results
[ "region:us" ]
2023-11-02T23:55:12+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 219281, "num_examples": 25}], "download_size": 175736, "dataset_size": 219281}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:21:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "temario_data-wikilingua_cstnews_results" Results of the model arthurmluz/ptt5-wikilingua-cstnews on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.3800757744192324, 'rouge2': 0.1539001654491066, 'rougeL': 0.2346540497659127, 'rougeLsum': 0.2346540497659127} bert= {'precision': 0.7361391615867615, 'recall': 0.6891939973831177, 'f1': 0.711702299118042} mover = 0.6075434818512242
[ "# Dataset Card for \"temario_data-wikilingua_cstnews_results\"\n\nResults of the model arthurmluz/ptt5-wikilingua-cstnews on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.3800757744192324, 'rouge2': 0.1539001654491066, 'rougeL': 0.2346540497659127, 'rougeLsum': 0.2346540497659127}\n\nbert= {'precision': 0.7361391615867615, 'recall': 0.6891939973831177, 'f1': 0.711702299118042}\n\nmover = 0.6075434818512242" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"temario_data-wikilingua_cstnews_results\"\n\nResults of the model arthurmluz/ptt5-wikilingua-cstnews on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.3800757744192324, 'rouge2': 0.1539001654491066, 'rougeL': 0.2346540497659127, 'rougeLsum': 0.2346540497659127}\n\nbert= {'precision': 0.7361391615867615, 'recall': 0.6891939973831177, 'f1': 0.711702299118042}\n\nmover = 0.6075434818512242" ]
[ 6, 199 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"temario_data-wikilingua_cstnews_results\"\n\nResults of the model arthurmluz/ptt5-wikilingua-cstnews on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.3800757744192324, 'rouge2': 0.1539001654491066, 'rougeL': 0.2346540497659127, 'rougeLsum': 0.2346540497659127}\n\nbert= {'precision': 0.7361391615867615, 'recall': 0.6891939973831177, 'f1': 0.711702299118042}\n\nmover = 0.6075434818512242" ]
382b71e4d4908115bad2a6fca71f40fc9b89d9f5
# Dataset Card for "temario_data-wikilingua_cstnews_1024_results" Results of the model arthurmluz/ptt5-wikilingua-cstnews-1024 on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.4019708009539416, 'rouge2': 0.1498883744908921, 'rougeL': 0.2357677592042992, 'rougeLsum': 0.2357677592042992} bert= {'precision': 0.7323308682441712, 'recall': 0.7016830992698669, 'f1': 0.7164008665084839} mover = 0.615330997959738
arthurmluz/temario_data-wiki_cstnews_1024_results
[ "region:us" ]
2023-11-03T00:00:31+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 223709, "num_examples": 25}], "download_size": 178816, "dataset_size": 223709}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:23:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "temario_data-wikilingua_cstnews_1024_results" Results of the model arthurmluz/ptt5-wikilingua-cstnews-1024 on the dataset godoyj/temario. 'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated. mean metrics: rouge= {'rouge1': 0.4019708009539416, 'rouge2': 0.1498883744908921, 'rougeL': 0.2357677592042992, 'rougeLsum': 0.2357677592042992} bert= {'precision': 0.7323308682441712, 'recall': 0.7016830992698669, 'f1': 0.7164008665084839} mover = 0.615330997959738
[ "# Dataset Card for \"temario_data-wikilingua_cstnews_1024_results\"\n\nResults of the model arthurmluz/ptt5-wikilingua-cstnews-1024 on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.4019708009539416, 'rouge2': 0.1498883744908921, 'rougeL': 0.2357677592042992, 'rougeLsum': 0.2357677592042992}\n\nbert= {'precision': 0.7323308682441712, 'recall': 0.7016830992698669, 'f1': 0.7164008665084839}\n\nmover = 0.615330997959738" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"temario_data-wikilingua_cstnews_1024_results\"\n\nResults of the model arthurmluz/ptt5-wikilingua-cstnews-1024 on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.4019708009539416, 'rouge2': 0.1498883744908921, 'rougeL': 0.2357677592042992, 'rougeLsum': 0.2357677592042992}\n\nbert= {'precision': 0.7323308682441712, 'recall': 0.7016830992698669, 'f1': 0.7164008665084839}\n\nmover = 0.615330997959738" ]
[ 6, 206 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"temario_data-wikilingua_cstnews_1024_results\"\n\nResults of the model arthurmluz/ptt5-wikilingua-cstnews-1024 on the dataset godoyj/temario.\n\n'gen_summary' is the generated summary, and both bertScore and Rouge metrics calculated.\n\nmean metrics:\n\nrouge= {'rouge1': 0.4019708009539416, 'rouge2': 0.1498883744908921, 'rougeL': 0.2357677592042992, 'rougeLsum': 0.2357677592042992}\n\nbert= {'precision': 0.7323308682441712, 'recall': 0.7016830992698669, 'f1': 0.7164008665084839}\n\nmover = 0.615330997959738" ]
a1fdbcf5a8340213d0f6a76e04f6486c38751564
# Dataset Card for "temario_data-xlsumm_cstnews_1024_results" rouge= {'rouge1': 0.3842309401085634, 'rouge2': 0.15048098740220198, 'rougeL': 0.2290304095965295, 'rougeLsum': 0.2290304095965295} bert= {'precision': 0.7249824571609497, 'recall': 0.6954835605621338, 'f1': 0.7095399975776673} mover = 0.6115396799603942
arthurmluz/temario_data-xlsum_cstnews_1024_results
[ "region:us" ]
2023-11-03T00:06:28+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 223671, "num_examples": 25}], "download_size": 178850, "dataset_size": 223671}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:32:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "temario_data-xlsumm_cstnews_1024_results" rouge= {'rouge1': 0.3842309401085634, 'rouge2': 0.15048098740220198, 'rougeL': 0.2290304095965295, 'rougeLsum': 0.2290304095965295} bert= {'precision': 0.7249824571609497, 'recall': 0.6954835605621338, 'f1': 0.7095399975776673} mover = 0.6115396799603942
[ "# Dataset Card for \"temario_data-xlsumm_cstnews_1024_results\"\n\nrouge= {'rouge1': 0.3842309401085634, 'rouge2': 0.15048098740220198, 'rougeL': 0.2290304095965295, 'rougeLsum': 0.2290304095965295}\n\nbert= {'precision': 0.7249824571609497, 'recall': 0.6954835605621338, 'f1': 0.7095399975776673}\n\nmover = 0.6115396799603942" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"temario_data-xlsumm_cstnews_1024_results\"\n\nrouge= {'rouge1': 0.3842309401085634, 'rouge2': 0.15048098740220198, 'rougeL': 0.2290304095965295, 'rougeLsum': 0.2290304095965295}\n\nbert= {'precision': 0.7249824571609497, 'recall': 0.6954835605621338, 'f1': 0.7095399975776673}\n\nmover = 0.6115396799603942" ]
[ 6, 143 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"temario_data-xlsumm_cstnews_1024_results\"\n\nrouge= {'rouge1': 0.3842309401085634, 'rouge2': 0.15048098740220198, 'rougeL': 0.2290304095965295, 'rougeLsum': 0.2290304095965295}\n\nbert= {'precision': 0.7249824571609497, 'recall': 0.6954835605621338, 'f1': 0.7095399975776673}\n\nmover = 0.6115396799603942" ]
7238681833c5347ab9b0e07d873da2c00d5245fa
# Dataset Card for "GPTextSum_data-wiki_results" rouge= {'rouge1': 0.22777331974906403, 'rouge2': 0.08345419898242161, 'rougeL': 0.17833325012462153, 'rougeLsum': 0.17833325012462153} bert= {'precision': 0.7432332336902618, 'recall': 0.6663061320781708, 'f1': 0.7022046685218811}
arthurmluz/GPTextSum_data-wiki_results
[ "region:us" ]
2023-11-03T00:41:06+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 25888, "num_examples": 20}], "download_size": 33075, "dataset_size": 25888}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T04:01:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "GPTextSum_data-wiki_results" rouge= {'rouge1': 0.22777331974906403, 'rouge2': 0.08345419898242161, 'rougeL': 0.17833325012462153, 'rougeLsum': 0.17833325012462153} bert= {'precision': 0.7432332336902618, 'recall': 0.6663061320781708, 'f1': 0.7022046685218811}
[ "# Dataset Card for \"GPTextSum_data-wiki_results\"\n\nrouge= {'rouge1': 0.22777331974906403, 'rouge2': 0.08345419898242161, 'rougeL': 0.17833325012462153, 'rougeLsum': 0.17833325012462153}\n\nbert= {'precision': 0.7432332336902618, 'recall': 0.6663061320781708, 'f1': 0.7022046685218811}" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"GPTextSum_data-wiki_results\"\n\nrouge= {'rouge1': 0.22777331974906403, 'rouge2': 0.08345419898242161, 'rougeL': 0.17833325012462153, 'rougeLsum': 0.17833325012462153}\n\nbert= {'precision': 0.7432332336902618, 'recall': 0.6663061320781708, 'f1': 0.7022046685218811}" ]
[ 6, 127 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"GPTextSum_data-wiki_results\"\n\nrouge= {'rouge1': 0.22777331974906403, 'rouge2': 0.08345419898242161, 'rougeL': 0.17833325012462153, 'rougeLsum': 0.17833325012462153}\n\nbert= {'precision': 0.7432332336902618, 'recall': 0.6663061320781708, 'f1': 0.7022046685218811}" ]
df7871c71856260e79018fec851a798d292cf3f6
# Dataset Card for "GPTextSum_data-wiki_1024_results" rouge= {'rouge1': 0.20436494957206813, 'rouge2': 0.06669792477248418, 'rougeL': 0.1645584797463879, 'rougeLsum': 0.1645584797463879} bert= {'precision': 0.7313757807016372, 'recall': 0.6589481264352799, 'f1': 0.6928485721349716}
arthurmluz/GPTextSum_data-wiki_1024_results
[ "region:us" ]
2023-11-03T00:42:10+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 25941, "num_examples": 20}], "download_size": 32992, "dataset_size": 25941}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T04:01:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "GPTextSum_data-wiki_1024_results" rouge= {'rouge1': 0.20436494957206813, 'rouge2': 0.06669792477248418, 'rougeL': 0.1645584797463879, 'rougeLsum': 0.1645584797463879} bert= {'precision': 0.7313757807016372, 'recall': 0.6589481264352799, 'f1': 0.6928485721349716}
[ "# Dataset Card for \"GPTextSum_data-wiki_1024_results\"\n\nrouge= {'rouge1': 0.20436494957206813, 'rouge2': 0.06669792477248418, 'rougeL': 0.1645584797463879, 'rougeLsum': 0.1645584797463879}\n\nbert= {'precision': 0.7313757807016372, 'recall': 0.6589481264352799, 'f1': 0.6928485721349716}" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"GPTextSum_data-wiki_1024_results\"\n\nrouge= {'rouge1': 0.20436494957206813, 'rouge2': 0.06669792477248418, 'rougeL': 0.1645584797463879, 'rougeLsum': 0.1645584797463879}\n\nbert= {'precision': 0.7313757807016372, 'recall': 0.6589481264352799, 'f1': 0.6928485721349716}" ]
[ 6, 129 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"GPTextSum_data-wiki_1024_results\"\n\nrouge= {'rouge1': 0.20436494957206813, 'rouge2': 0.06669792477248418, 'rougeL': 0.1645584797463879, 'rougeLsum': 0.1645584797463879}\n\nbert= {'precision': 0.7313757807016372, 'recall': 0.6589481264352799, 'f1': 0.6928485721349716}" ]
25fadeb131c0a231c446fe536c59fefc34b9d7a7
# Dataset Card for "GPTextSum_data-wiki_cstnews_results" rouge= {'rouge1': 0.448765925729676, 'rouge2': 0.22185620300242942, 'rougeL': 0.334266564181735, 'rougeLsum': 0.334266564181735} bert= {'precision': 0.7489787250757217, 'recall': 0.7784000992774963, 'f1': 0.7628451943397522}
arthurmluz/GPTextSum_data-wiki_cstnews_results
[ "region:us" ]
2023-11-03T00:43:38+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 31520, "num_examples": 20}], "download_size": 39539, "dataset_size": 31520}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T04:05:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "GPTextSum_data-wiki_cstnews_results" rouge= {'rouge1': 0.448765925729676, 'rouge2': 0.22185620300242942, 'rougeL': 0.334266564181735, 'rougeLsum': 0.334266564181735} bert= {'precision': 0.7489787250757217, 'recall': 0.7784000992774963, 'f1': 0.7628451943397522}
[ "# Dataset Card for \"GPTextSum_data-wiki_cstnews_results\"\n\nrouge= {'rouge1': 0.448765925729676, 'rouge2': 0.22185620300242942, 'rougeL': 0.334266564181735, 'rougeLsum': 0.334266564181735}\n\nbert= {'precision': 0.7489787250757217, 'recall': 0.7784000992774963, 'f1': 0.7628451943397522}" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"GPTextSum_data-wiki_cstnews_results\"\n\nrouge= {'rouge1': 0.448765925729676, 'rouge2': 0.22185620300242942, 'rougeL': 0.334266564181735, 'rougeLsum': 0.334266564181735}\n\nbert= {'precision': 0.7489787250757217, 'recall': 0.7784000992774963, 'f1': 0.7628451943397522}" ]
[ 6, 131 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"GPTextSum_data-wiki_cstnews_results\"\n\nrouge= {'rouge1': 0.448765925729676, 'rouge2': 0.22185620300242942, 'rougeL': 0.334266564181735, 'rougeLsum': 0.334266564181735}\n\nbert= {'precision': 0.7489787250757217, 'recall': 0.7784000992774963, 'f1': 0.7628451943397522}" ]
1b2cd9cf66c918980bc7020ffe414c6a3c3b7187
# Dataset Card for "GPTextSum_data-wiki_cstnews_1024_results" rouge= {'rouge1': 0.4308100446730652, 'rouge2': 0.21805946047970845, 'rougeL': 0.32857244937346486, 'rougeLsum': 0.32857244937346486} bert= {'precision': 0.7545565485954284, 'recall': 0.7717844933271408, 'f1': 0.762201464176178}
arthurmluz/GPTextSum_data-wiki_cstnews_1024_results
[ "region:us" ]
2023-11-03T00:45:45+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 30882, "num_examples": 20}], "download_size": 39589, "dataset_size": 30882}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T04:05:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "GPTextSum_data-wiki_cstnews_1024_results" rouge= {'rouge1': 0.4308100446730652, 'rouge2': 0.21805946047970845, 'rougeL': 0.32857244937346486, 'rougeLsum': 0.32857244937346486} bert= {'precision': 0.7545565485954284, 'recall': 0.7717844933271408, 'f1': 0.762201464176178}
[ "# Dataset Card for \"GPTextSum_data-wiki_cstnews_1024_results\"\n\nrouge= {'rouge1': 0.4308100446730652, 'rouge2': 0.21805946047970845, 'rougeL': 0.32857244937346486, 'rougeLsum': 0.32857244937346486}\n\nbert= {'precision': 0.7545565485954284, 'recall': 0.7717844933271408, 'f1': 0.762201464176178}" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"GPTextSum_data-wiki_cstnews_1024_results\"\n\nrouge= {'rouge1': 0.4308100446730652, 'rouge2': 0.21805946047970845, 'rougeL': 0.32857244937346486, 'rougeLsum': 0.32857244937346486}\n\nbert= {'precision': 0.7545565485954284, 'recall': 0.7717844933271408, 'f1': 0.762201464176178}" ]
[ 6, 134 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"GPTextSum_data-wiki_cstnews_1024_results\"\n\nrouge= {'rouge1': 0.4308100446730652, 'rouge2': 0.21805946047970845, 'rougeL': 0.32857244937346486, 'rougeLsum': 0.32857244937346486}\n\nbert= {'precision': 0.7545565485954284, 'recall': 0.7717844933271408, 'f1': 0.762201464176178}" ]
26677d074e856901394c9f06cbc265d0b3345976
# Dataset Card for "GPTextSum_data-wiki_temario_results" rouge= {'rouge1': 0.4138915416727407, 'rouge2': 0.21239574315097523, 'rougeL': 0.32579088959468294, 'rougeLsum': 0.32579088959468294} bert= {'precision': 0.7588523745536804, 'recall': 0.7835794538259506, 'f1': 0.769540649652481}
arthurmluz/GPTextSum_data-wiki_temario_results
[ "region:us" ]
2023-11-03T00:46:53+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 31786, "num_examples": 20}], "download_size": 40596, "dataset_size": 31786}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T04:05:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "GPTextSum_data-wiki_temario_results" rouge= {'rouge1': 0.4138915416727407, 'rouge2': 0.21239574315097523, 'rougeL': 0.32579088959468294, 'rougeLsum': 0.32579088959468294} bert= {'precision': 0.7588523745536804, 'recall': 0.7835794538259506, 'f1': 0.769540649652481}
[ "# Dataset Card for \"GPTextSum_data-wiki_temario_results\"\n\nrouge= {'rouge1': 0.4138915416727407, 'rouge2': 0.21239574315097523, 'rougeL': 0.32579088959468294, 'rougeLsum': 0.32579088959468294}\n\nbert= {'precision': 0.7588523745536804, 'recall': 0.7835794538259506, 'f1': 0.769540649652481}" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"GPTextSum_data-wiki_temario_results\"\n\nrouge= {'rouge1': 0.4138915416727407, 'rouge2': 0.21239574315097523, 'rougeL': 0.32579088959468294, 'rougeLsum': 0.32579088959468294}\n\nbert= {'precision': 0.7588523745536804, 'recall': 0.7835794538259506, 'f1': 0.769540649652481}" ]
[ 6, 131 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"GPTextSum_data-wiki_temario_results\"\n\nrouge= {'rouge1': 0.4138915416727407, 'rouge2': 0.21239574315097523, 'rougeL': 0.32579088959468294, 'rougeLsum': 0.32579088959468294}\n\nbert= {'precision': 0.7588523745536804, 'recall': 0.7835794538259506, 'f1': 0.769540649652481}" ]
f7573559f16f40c4346493f625355c306984c94a
# Dataset Card for "GPTextSum_data-temario_results" rouge= {'rouge1': 0.3521895422836724, 'rouge2': 0.18278167550878366, 'rougeL': 0.27857021634712387, 'rougeLsum': 0.27857021634712387} bert= {'precision': 0.700176528096199, 'recall': 0.8076501220464707, 'f1': 0.7497184872627258}
arthurmluz/GPTextSum_data-temario_results
[ "region:us" ]
2023-11-03T00:48:11+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 38584, "num_examples": 20}], "download_size": 47319, "dataset_size": 38584}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T04:01:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "GPTextSum_data-temario_results" rouge= {'rouge1': 0.3521895422836724, 'rouge2': 0.18278167550878366, 'rougeL': 0.27857021634712387, 'rougeLsum': 0.27857021634712387} bert= {'precision': 0.700176528096199, 'recall': 0.8076501220464707, 'f1': 0.7497184872627258}
[ "# Dataset Card for \"GPTextSum_data-temario_results\"\n\nrouge= {'rouge1': 0.3521895422836724, 'rouge2': 0.18278167550878366, 'rougeL': 0.27857021634712387, 'rougeLsum': 0.27857021634712387}\n\nbert= {'precision': 0.700176528096199, 'recall': 0.8076501220464707, 'f1': 0.7497184872627258}" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"GPTextSum_data-temario_results\"\n\nrouge= {'rouge1': 0.3521895422836724, 'rouge2': 0.18278167550878366, 'rougeL': 0.27857021634712387, 'rougeLsum': 0.27857021634712387}\n\nbert= {'precision': 0.700176528096199, 'recall': 0.8076501220464707, 'f1': 0.7497184872627258}" ]
[ 6, 127 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"GPTextSum_data-temario_results\"\n\nrouge= {'rouge1': 0.3521895422836724, 'rouge2': 0.18278167550878366, 'rougeL': 0.27857021634712387, 'rougeLsum': 0.27857021634712387}\n\nbert= {'precision': 0.700176528096199, 'recall': 0.8076501220464707, 'f1': 0.7497184872627258}" ]
80c0e8fea601e05e3956b6d7643cb2f598751eec
# Dataset Card for "GPTextSum_data-xlsumm_results" rouge= {'rouge1': 0.3029093047862411, 'rouge2': 0.0986410948279747, 'rougeL': 0.20919963393917018, 'rougeLsum': 0.20919963393917018} bert= {'precision': 0.7287724256515503, 'recall': 0.6897789388895035, 'f1': 0.7082490384578705}
arthurmluz/GPTextSum_data-xlsum_results
[ "region:us" ]
2023-11-03T00:49:12+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 27108, "num_examples": 20}], "download_size": 34394, "dataset_size": 27108}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T04:10:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "GPTextSum_data-xlsumm_results" rouge= {'rouge1': 0.3029093047862411, 'rouge2': 0.0986410948279747, 'rougeL': 0.20919963393917018, 'rougeLsum': 0.20919963393917018} bert= {'precision': 0.7287724256515503, 'recall': 0.6897789388895035, 'f1': 0.7082490384578705}
[ "# Dataset Card for \"GPTextSum_data-xlsumm_results\"\n\nrouge= {'rouge1': 0.3029093047862411, 'rouge2': 0.0986410948279747, 'rougeL': 0.20919963393917018, 'rougeLsum': 0.20919963393917018}\n\nbert= {'precision': 0.7287724256515503, 'recall': 0.6897789388895035, 'f1': 0.7082490384578705}" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"GPTextSum_data-xlsumm_results\"\n\nrouge= {'rouge1': 0.3029093047862411, 'rouge2': 0.0986410948279747, 'rougeL': 0.20919963393917018, 'rougeLsum': 0.20919963393917018}\n\nbert= {'precision': 0.7287724256515503, 'recall': 0.6897789388895035, 'f1': 0.7082490384578705}" ]
[ 6, 129 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"GPTextSum_data-xlsumm_results\"\n\nrouge= {'rouge1': 0.3029093047862411, 'rouge2': 0.0986410948279747, 'rougeL': 0.20919963393917018, 'rougeLsum': 0.20919963393917018}\n\nbert= {'precision': 0.7287724256515503, 'recall': 0.6897789388895035, 'f1': 0.7082490384578705}" ]
64c9014fb28b07c4ab209438442b610a54e962b5
# Dataset Card for "GPTextSum_data-xlsumm_cstnews_1024_results" rouge= {'rouge1': 0.42328596861870976, 'rouge2': 0.20509969476992213, 'rougeL': 0.32990453663237673, 'rougeLsum': 0.32990453663237673} bert= {'precision': 0.7561025887727737, 'recall': 0.7690637379884719, 'f1': 0.7619699746370315}
arthurmluz/GPTextSum_data-xlsum_cstnews_1024_results
[ "region:us" ]
2023-11-03T00:50:22+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 30182, "num_examples": 20}], "download_size": 37156, "dataset_size": 30182}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T04:10:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "GPTextSum_data-xlsumm_cstnews_1024_results" rouge= {'rouge1': 0.42328596861870976, 'rouge2': 0.20509969476992213, 'rougeL': 0.32990453663237673, 'rougeLsum': 0.32990453663237673} bert= {'precision': 0.7561025887727737, 'recall': 0.7690637379884719, 'f1': 0.7619699746370315}
[ "# Dataset Card for \"GPTextSum_data-xlsumm_cstnews_1024_results\"\n\nrouge= {'rouge1': 0.42328596861870976, 'rouge2': 0.20509969476992213, 'rougeL': 0.32990453663237673, 'rougeLsum': 0.32990453663237673}\n\nbert= {'precision': 0.7561025887727737, 'recall': 0.7690637379884719, 'f1': 0.7619699746370315}" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"GPTextSum_data-xlsumm_cstnews_1024_results\"\n\nrouge= {'rouge1': 0.42328596861870976, 'rouge2': 0.20509969476992213, 'rougeL': 0.32990453663237673, 'rougeLsum': 0.32990453663237673}\n\nbert= {'precision': 0.7561025887727737, 'recall': 0.7690637379884719, 'f1': 0.7619699746370315}" ]
[ 6, 139 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"GPTextSum_data-xlsumm_cstnews_1024_results\"\n\nrouge= {'rouge1': 0.42328596861870976, 'rouge2': 0.20509969476992213, 'rougeL': 0.32990453663237673, 'rougeLsum': 0.32990453663237673}\n\nbert= {'precision': 0.7561025887727737, 'recall': 0.7690637379884719, 'f1': 0.7619699746370315}" ]
9ec16f35f4f699caabf2a1b05cf6134784917f02
# Dataset Card for "GPTextSum_data-xlsumm_cstnews_results" rouge= {'rouge1': 0.4382657703207724, 'rouge2': 0.21548980809200038, 'rougeL': 0.3415963857072833, 'rougeLsum': 0.3415963857072833} bert= {'precision': 0.75693099796772, 'recall': 0.7692080974578858, 'f1': 0.7624350398778915}
arthurmluz/GPTextSum_data-xlsum_cstnews_results
[ "region:us" ]
2023-11-03T00:51:23+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 30499, "num_examples": 20}], "download_size": 37935, "dataset_size": 30499}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T04:10:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "GPTextSum_data-xlsumm_cstnews_results" rouge= {'rouge1': 0.4382657703207724, 'rouge2': 0.21548980809200038, 'rougeL': 0.3415963857072833, 'rougeLsum': 0.3415963857072833} bert= {'precision': 0.75693099796772, 'recall': 0.7692080974578858, 'f1': 0.7624350398778915}
[ "# Dataset Card for \"GPTextSum_data-xlsumm_cstnews_results\"\n\nrouge= {'rouge1': 0.4382657703207724, 'rouge2': 0.21548980809200038, 'rougeL': 0.3415963857072833, 'rougeLsum': 0.3415963857072833}\n\nbert= {'precision': 0.75693099796772, 'recall': 0.7692080974578858, 'f1': 0.7624350398778915}" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"GPTextSum_data-xlsumm_cstnews_results\"\n\nrouge= {'rouge1': 0.4382657703207724, 'rouge2': 0.21548980809200038, 'rougeL': 0.3415963857072833, 'rougeLsum': 0.3415963857072833}\n\nbert= {'precision': 0.75693099796772, 'recall': 0.7692080974578858, 'f1': 0.7624350398778915}" ]
[ 6, 133 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"GPTextSum_data-xlsumm_cstnews_results\"\n\nrouge= {'rouge1': 0.4382657703207724, 'rouge2': 0.21548980809200038, 'rougeL': 0.3415963857072833, 'rougeLsum': 0.3415963857072833}\n\nbert= {'precision': 0.75693099796772, 'recall': 0.7692080974578858, 'f1': 0.7624350398778915}" ]
305f7e046f32e52a2b9867df481607541d091a2a
# Dataset Card for "cstnews_data-wiki_results" rouge= {'rouge1': 0.2655324963122997, 'rouge2': 0.11389233951370548, 'rougeL': 0.18758958356070038, 'rougeLsum': 0.18758958356070038} bert= {'precision': 0.7567696012556553, 'recall': 0.6739303469657898, 'f1': 0.7126890532672405} mover = 0.5746077032702939
arthurmluz/cstnews_data-wiki_results
[ "region:us" ]
2023-11-03T00:53:35+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 50786, "num_examples": 16}], "download_size": 0, "dataset_size": 50786}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:59:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cstnews_data-wiki_results" rouge= {'rouge1': 0.2655324963122997, 'rouge2': 0.11389233951370548, 'rougeL': 0.18758958356070038, 'rougeLsum': 0.18758958356070038} bert= {'precision': 0.7567696012556553, 'recall': 0.6739303469657898, 'f1': 0.7126890532672405} mover = 0.5746077032702939
[ "# Dataset Card for \"cstnews_data-wiki_results\"\n\nrouge= {'rouge1': 0.2655324963122997, 'rouge2': 0.11389233951370548, 'rougeL': 0.18758958356070038, 'rougeLsum': 0.18758958356070038}\n\nbert= {'precision': 0.7567696012556553, 'recall': 0.6739303469657898, 'f1': 0.7126890532672405}\n\nmover = 0.5746077032702939" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cstnews_data-wiki_results\"\n\nrouge= {'rouge1': 0.2655324963122997, 'rouge2': 0.11389233951370548, 'rougeL': 0.18758958356070038, 'rougeLsum': 0.18758958356070038}\n\nbert= {'precision': 0.7567696012556553, 'recall': 0.6739303469657898, 'f1': 0.7126890532672405}\n\nmover = 0.5746077032702939" ]
[ 6, 139 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cstnews_data-wiki_results\"\n\nrouge= {'rouge1': 0.2655324963122997, 'rouge2': 0.11389233951370548, 'rougeL': 0.18758958356070038, 'rougeLsum': 0.18758958356070038}\n\nbert= {'precision': 0.7567696012556553, 'recall': 0.6739303469657898, 'f1': 0.7126890532672405}\n\nmover = 0.5746077032702939" ]
2c5ebe5a16643d623e172f4e6439d54f878074c4
# Dataset Card for "cstnews_data-wiki_1024_results" rouge= {'rouge1': 0.2556160962870545, 'rouge2': 0.10105330295297661, 'rougeL': 0.18616276144956143, 'rougeLsum': 0.18616276144956143} bert= {'precision': 0.7447284124791622, 'recall': 0.6668070293962955, 'f1': 0.7032350450754166} mover = 0.5737413741663241
arthurmluz/cstnews_data-wiki_1024_results
[ "region:us" ]
2023-11-03T00:55:11+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 50681, "num_examples": 16}], "download_size": 0, "dataset_size": 50681}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:59:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cstnews_data-wiki_1024_results" rouge= {'rouge1': 0.2556160962870545, 'rouge2': 0.10105330295297661, 'rougeL': 0.18616276144956143, 'rougeLsum': 0.18616276144956143} bert= {'precision': 0.7447284124791622, 'recall': 0.6668070293962955, 'f1': 0.7032350450754166} mover = 0.5737413741663241
[ "# Dataset Card for \"cstnews_data-wiki_1024_results\"\n\nrouge= {'rouge1': 0.2556160962870545, 'rouge2': 0.10105330295297661, 'rougeL': 0.18616276144956143, 'rougeLsum': 0.18616276144956143}\n\nbert= {'precision': 0.7447284124791622, 'recall': 0.6668070293962955, 'f1': 0.7032350450754166}\n\nmover = 0.5737413741663241" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cstnews_data-wiki_1024_results\"\n\nrouge= {'rouge1': 0.2556160962870545, 'rouge2': 0.10105330295297661, 'rougeL': 0.18616276144956143, 'rougeLsum': 0.18616276144956143}\n\nbert= {'precision': 0.7447284124791622, 'recall': 0.6668070293962955, 'f1': 0.7032350450754166}\n\nmover = 0.5737413741663241" ]
[ 6, 140 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cstnews_data-wiki_1024_results\"\n\nrouge= {'rouge1': 0.2556160962870545, 'rouge2': 0.10105330295297661, 'rougeL': 0.18616276144956143, 'rougeLsum': 0.18616276144956143}\n\nbert= {'precision': 0.7447284124791622, 'recall': 0.6668070293962955, 'f1': 0.7032350450754166}\n\nmover = 0.5737413741663241" ]
68ae89273eaf7aa5d75d11fb124123346ade26e5
# Dataset Card for "cstnews_data-wiki_cstnews_results" rouge= {'rouge1': 0.6051775604734266, 'rouge2': 0.40902323115473077, 'rougeL': 0.46508195128423335, 'rougeLsum': 0.46508195128423335} bert= {'precision': 0.8185752630233765, 'recall': 0.8162486664950848, 'f1': 0.8168921731412411} mover = 0.6600971841852161
arthurmluz/cstnews_data-wiki_cstnews_results
[ "region:us" ]
2023-11-03T00:56:34+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 57183, "num_examples": 16}], "download_size": 54414, "dataset_size": 57183}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:47:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cstnews_data-wiki_cstnews_results" rouge= {'rouge1': 0.6051775604734266, 'rouge2': 0.40902323115473077, 'rougeL': 0.46508195128423335, 'rougeLsum': 0.46508195128423335} bert= {'precision': 0.8185752630233765, 'recall': 0.8162486664950848, 'f1': 0.8168921731412411} mover = 0.6600971841852161
[ "# Dataset Card for \"cstnews_data-wiki_cstnews_results\"\n\nrouge= {'rouge1': 0.6051775604734266, 'rouge2': 0.40902323115473077, 'rougeL': 0.46508195128423335, 'rougeLsum': 0.46508195128423335}\n\nbert= {'precision': 0.8185752630233765, 'recall': 0.8162486664950848, 'f1': 0.8168921731412411}\n\nmover = 0.6600971841852161" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cstnews_data-wiki_cstnews_results\"\n\nrouge= {'rouge1': 0.6051775604734266, 'rouge2': 0.40902323115473077, 'rougeL': 0.46508195128423335, 'rougeLsum': 0.46508195128423335}\n\nbert= {'precision': 0.8185752630233765, 'recall': 0.8162486664950848, 'f1': 0.8168921731412411}\n\nmover = 0.6600971841852161" ]
[ 6, 146 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cstnews_data-wiki_cstnews_results\"\n\nrouge= {'rouge1': 0.6051775604734266, 'rouge2': 0.40902323115473077, 'rougeL': 0.46508195128423335, 'rougeLsum': 0.46508195128423335}\n\nbert= {'precision': 0.8185752630233765, 'recall': 0.8162486664950848, 'f1': 0.8168921731412411}\n\nmover = 0.6600971841852161" ]
1310933b827eb4d4da448af8c7d75846f54fea8d
# Dataset Card for "cstnews_data-wiki_cstnews_1024_results" rouge= {'rouge1': 0.6132183984247445, 'rouge2': 0.41647613950003065, 'rougeL': 0.4765044905867061, 'rougeLsum': 0.4765044905867061} bert= {'precision': 0.8172679543495178, 'recall': 0.816291406750679, 'f1': 0.8162719868123531} mover = 0.6605804820730224
arthurmluz/cstnews_data-wiki_cstnews_1024_results
[ "region:us" ]
2023-11-03T01:00:13+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 57448, "num_examples": 16}], "download_size": 54346, "dataset_size": 57448}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:48:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cstnews_data-wiki_cstnews_1024_results" rouge= {'rouge1': 0.6132183984247445, 'rouge2': 0.41647613950003065, 'rougeL': 0.4765044905867061, 'rougeLsum': 0.4765044905867061} bert= {'precision': 0.8172679543495178, 'recall': 0.816291406750679, 'f1': 0.8162719868123531} mover = 0.6605804820730224
[ "# Dataset Card for \"cstnews_data-wiki_cstnews_1024_results\"\n\nrouge= {'rouge1': 0.6132183984247445, 'rouge2': 0.41647613950003065, 'rougeL': 0.4765044905867061, 'rougeLsum': 0.4765044905867061}\n\nbert= {'precision': 0.8172679543495178, 'recall': 0.816291406750679, 'f1': 0.8162719868123531}\n\nmover = 0.6605804820730224" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cstnews_data-wiki_cstnews_1024_results\"\n\nrouge= {'rouge1': 0.6132183984247445, 'rouge2': 0.41647613950003065, 'rougeL': 0.4765044905867061, 'rougeLsum': 0.4765044905867061}\n\nbert= {'precision': 0.8172679543495178, 'recall': 0.816291406750679, 'f1': 0.8162719868123531}\n\nmover = 0.6605804820730224" ]
[ 6, 145 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cstnews_data-wiki_cstnews_1024_results\"\n\nrouge= {'rouge1': 0.6132183984247445, 'rouge2': 0.41647613950003065, 'rougeL': 0.4765044905867061, 'rougeLsum': 0.4765044905867061}\n\nbert= {'precision': 0.8172679543495178, 'recall': 0.816291406750679, 'f1': 0.8162719868123531}\n\nmover = 0.6605804820730224" ]
267e519d3a0e977d2ae85729660111336e4f70e8
# Dataset Card for race ## Licensing Information http://www.cs.cmu.edu/~glai1/data/race/ 1. RACE dataset is available for non-commercial research purpose only. 2. All passages are obtained from the Internet which is not property of Carnegie Mellon University. We are not responsible for the content nor the meaning of these passages. 3. You agree not to reproduce, duplicate, copy, sell, trade, resell or exploit for any commercial purpose, any portion of the contexts and any portion of derived data. 4. We reserve the right to terminate your access to the RACE dataset at any time. ## Source Data Citation INformation ``` @inproceedings{lai-etal-2017-race, title = "{RACE}: Large-scale {R}e{A}ding Comprehension Dataset From Examinations", author = "Lai, Guokun and Xie, Qizhe and Liu, Hanxiao and Yang, Yiming and Hovy, Eduard", booktitle = "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", month = sep, year = "2017", address = "Copenhagen, Denmark", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/D17-1082", doi = "10.18653/v1/D17-1082", pages = "785--794", } ```
KETI-AIR/kor_race
[ "task_categories:multiple-choice", "task_ids:multiple-choice-qa", "size_categories:1K<n<10K", "language:ko", "license:other", "region:us" ]
2023-11-03T01:02:06+00:00
{"language": ["ko"], "license": "other", "size_categories": ["1K<n<10K"], "task_categories": ["multiple-choice"], "task_ids": ["multiple-choice-qa"], "pretty_name": "race", "license_name": "race", "license_link": "https://www.cs.cmu.edu/~glai1/data/race/", "dataset_info": {"features": [{"name": "data_index_by_user", "dtype": "int32"}, {"name": "article", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "options", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 191129599, "num_examples": 87866}, {"name": "validation", "num_bytes": 10507580, "num_examples": 4887}, {"name": "test", "num_bytes": 10668488, "num_examples": 4934}], "download_size": 46954865, "dataset_size": 212305667}}
2023-11-15T01:13:23+00:00
[]
[ "ko" ]
TAGS #task_categories-multiple-choice #task_ids-multiple-choice-qa #size_categories-1K<n<10K #language-Korean #license-other #region-us
# Dataset Card for race ## Licensing Information URL 1. RACE dataset is available for non-commercial research purpose only. 2. All passages are obtained from the Internet which is not property of Carnegie Mellon University. We are not responsible for the content nor the meaning of these passages. 3. You agree not to reproduce, duplicate, copy, sell, trade, resell or exploit for any commercial purpose, any portion of the contexts and any portion of derived data. 4. We reserve the right to terminate your access to the RACE dataset at any time. ## Source Data Citation INformation
[ "# Dataset Card for race", "## Licensing Information\n\nURL\n\n1. RACE dataset is available for non-commercial research purpose only.\n\n2. All passages are obtained from the Internet which is not property of Carnegie Mellon University. We are not responsible for the content nor the meaning of these passages.\n\n3. You agree not to reproduce, duplicate, copy, sell, trade, resell or exploit for any commercial purpose, any portion of the contexts and any portion of derived data.\n\n4. We reserve the right to terminate your access to the RACE dataset at any time.", "## Source Data Citation INformation" ]
[ "TAGS\n#task_categories-multiple-choice #task_ids-multiple-choice-qa #size_categories-1K<n<10K #language-Korean #license-other #region-us \n", "# Dataset Card for race", "## Licensing Information\n\nURL\n\n1. RACE dataset is available for non-commercial research purpose only.\n\n2. All passages are obtained from the Internet which is not property of Carnegie Mellon University. We are not responsible for the content nor the meaning of these passages.\n\n3. You agree not to reproduce, duplicate, copy, sell, trade, resell or exploit for any commercial purpose, any portion of the contexts and any portion of derived data.\n\n4. We reserve the right to terminate your access to the RACE dataset at any time.", "## Source Data Citation INformation" ]
[ 53, 6, 119, 8 ]
[ "passage: TAGS\n#task_categories-multiple-choice #task_ids-multiple-choice-qa #size_categories-1K<n<10K #language-Korean #license-other #region-us \n# Dataset Card for race## Licensing Information\n\nURL\n\n1. RACE dataset is available for non-commercial research purpose only.\n\n2. All passages are obtained from the Internet which is not property of Carnegie Mellon University. We are not responsible for the content nor the meaning of these passages.\n\n3. You agree not to reproduce, duplicate, copy, sell, trade, resell or exploit for any commercial purpose, any portion of the contexts and any portion of derived data.\n\n4. We reserve the right to terminate your access to the RACE dataset at any time.## Source Data Citation INformation" ]
6ea31e59f0d508b9cce053370a8bf018c66a7839
# Dataset Card for "cstnews_data-temario_results" rouge= {'rouge1': 0.5207584715132082, 'rouge2': 0.34711381882009107, 'rougeL': 0.38095639884621346, 'rougeLsum': 0.38095639884621346} bert= {'precision': 0.7428307943046093, 'recall': 0.8364794515073299, 'f1': 0.7866528294980526} mover = 0.6287250343090405
arthurmluz/cstnews_data-temario_results
[ "region:us" ]
2023-11-03T01:02:45+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 69932, "num_examples": 16}], "download_size": 0, "dataset_size": 69932}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T04:01:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cstnews_data-temario_results" rouge= {'rouge1': 0.5207584715132082, 'rouge2': 0.34711381882009107, 'rougeL': 0.38095639884621346, 'rougeLsum': 0.38095639884621346} bert= {'precision': 0.7428307943046093, 'recall': 0.8364794515073299, 'f1': 0.7866528294980526} mover = 0.6287250343090405
[ "# Dataset Card for \"cstnews_data-temario_results\"\n\n\nrouge= {'rouge1': 0.5207584715132082, 'rouge2': 0.34711381882009107, 'rougeL': 0.38095639884621346, 'rougeLsum': 0.38095639884621346}\n\nbert= {'precision': 0.7428307943046093, 'recall': 0.8364794515073299, 'f1': 0.7866528294980526}\n\nmover = 0.6287250343090405" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cstnews_data-temario_results\"\n\n\nrouge= {'rouge1': 0.5207584715132082, 'rouge2': 0.34711381882009107, 'rougeL': 0.38095639884621346, 'rougeLsum': 0.38095639884621346}\n\nbert= {'precision': 0.7428307943046093, 'recall': 0.8364794515073299, 'f1': 0.7866528294980526}\n\nmover = 0.6287250343090405" ]
[ 6, 139 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cstnews_data-temario_results\"\n\n\nrouge= {'rouge1': 0.5207584715132082, 'rouge2': 0.34711381882009107, 'rougeL': 0.38095639884621346, 'rougeLsum': 0.38095639884621346}\n\nbert= {'precision': 0.7428307943046093, 'recall': 0.8364794515073299, 'f1': 0.7866528294980526}\n\nmover = 0.6287250343090405" ]
578b29091cce76ec1b464db1bb76cd37c4e9a7bf
Contributors: Nicolas Mejia Petit # Vezora's CodeTester Dataset ![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg) ## Introduction Today, on November 2, 2023, we are excited to release our internal Python dataset with 22,600 examples of code. These examples have been meticulously tested and verified as working. Our dataset was created using a script we developed. ### Dataset Creation - Our script operates by extracting Python code from the output section of Alpaca-formatted datasets. It tests each extracted piece of code, keeping it if it passes and removing it if it fails, then saves all the working code in a seperate dataset. - Our second script works by removing the not working code from your alpaca datasets, and saves it to a not working code json, and then keeps all the working examples along with any other non python related examples, and saves it. - !WARNING! this script does run on ypur local computer, with mutithreading so it runs fast, if there is any malicious python code in your dataset, it WILL run on your local computer so either run in a VM or don't sift through shady datasets. Lastly, it is required that you have python packages installed, just main ones most would have already installed but some like tkinter and other packages in order for certain lines of code to be tested. - (if you are struggling converting your dataset to alpaca format, give the first three questions of both datasets and ask chat gpt or bing to give you a script to convert the dataset to that format you want. Might take one or two tries.) - The creation of this dataset involved leveraging open source datasets from various sources, including Wizard-LM's Evol datasets, CodeUp's 19k, Sahils2801's Code Alpaca, Eric Heartford's Dolphin, and a selection of hand-prompted GPT-4 code questions. The resulting dataset was carefully deduplicated. - We discovered that many of the open source datasets contained thousands of non-functional code examples, often plagued by module errors and other issues. Importantly, our script's approach is highly adaptable and could potentially be used to test code in other languages such as C++, C, SQL, and more. ### Usage Guidelines We invested a significant amount of time in developing this script. If you intend to use it to extract functional code in your own projects or datasets, and or plan on using our dataset, please include the following attribution in your model's or dataset's repository: "Filtered Using Vezora's CodeTester" ## Motivation We are releasing our internal tool thanks to Open Chat 3.5's recognition of its foundational model limitations, particularly in tasks related to code. ### Limitations of Foundational Models It's essential to note that even when writing syntactically correct code, foundational models often lack access to up-to-date Python and API documentation. As a result, code generated by these models may contain errors stemming from outdated calls or methods. ## Building a Strong Python Code Model If you aspire to build a robust Python code model, we recommend the following steps: 1. Pretrain with Mistral 7b on UPTODATE Python and API documentations. (during our testing we found even when a model writes syntactyically correct code it lacks up to date api calls and functions.) 2. Consider incorporating programming textbooks into your training. 3. Fine-tune your model with our dataset using SFT (Supervised Fine-Tuning). In the future, we may also release our "not working" code dataset, allowing users to create a Discriminative Pretraining Objective (DPO) model to reward functional code over non-functional code. Although with the second script provided, it would be pretty easy to do it your self. We hope this dataset serves as a valuable resource for the community and contributes to the improvement of code-related AI models. Why there are some references to 188k, we had used a script to count the examples in the dataset, and not realized the script wasn't meant to alpaca datasets, so it counted the examples wrong. Therefore, this is "only" 22k of functioning python code examples. However we are soon to release a better coding dataset, people will be even very happy with, containing over 220,000 examples of code (only tested for python code but contains many other languages.) I will also be releasing 13k examples of not working code, for the purpose of a DPO datasets, or RLHF.
Vezora/Tested-22k-Python-Alpaca
[ "license:apache-2.0", "region:us" ]
2023-11-03T01:06:45+00:00
{"license": "apache-2.0"}
2023-12-26T00:02:32+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
Contributors: Nicolas Mejia Petit # Vezora's CodeTester Dataset !License ## Introduction Today, on November 2, 2023, we are excited to release our internal Python dataset with 22,600 examples of code. These examples have been meticulously tested and verified as working. Our dataset was created using a script we developed. ### Dataset Creation - Our script operates by extracting Python code from the output section of Alpaca-formatted datasets. It tests each extracted piece of code, keeping it if it passes and removing it if it fails, then saves all the working code in a seperate dataset. - Our second script works by removing the not working code from your alpaca datasets, and saves it to a not working code json, and then keeps all the working examples along with any other non python related examples, and saves it. - !WARNING! this script does run on ypur local computer, with mutithreading so it runs fast, if there is any malicious python code in your dataset, it WILL run on your local computer so either run in a VM or don't sift through shady datasets. Lastly, it is required that you have python packages installed, just main ones most would have already installed but some like tkinter and other packages in order for certain lines of code to be tested. - (if you are struggling converting your dataset to alpaca format, give the first three questions of both datasets and ask chat gpt or bing to give you a script to convert the dataset to that format you want. Might take one or two tries.) - The creation of this dataset involved leveraging open source datasets from various sources, including Wizard-LM's Evol datasets, CodeUp's 19k, Sahils2801's Code Alpaca, Eric Heartford's Dolphin, and a selection of hand-prompted GPT-4 code questions. The resulting dataset was carefully deduplicated. - We discovered that many of the open source datasets contained thousands of non-functional code examples, often plagued by module errors and other issues. Importantly, our script's approach is highly adaptable and could potentially be used to test code in other languages such as C++, C, SQL, and more. ### Usage Guidelines We invested a significant amount of time in developing this script. If you intend to use it to extract functional code in your own projects or datasets, and or plan on using our dataset, please include the following attribution in your model's or dataset's repository: "Filtered Using Vezora's CodeTester" ## Motivation We are releasing our internal tool thanks to Open Chat 3.5's recognition of its foundational model limitations, particularly in tasks related to code. ### Limitations of Foundational Models It's essential to note that even when writing syntactically correct code, foundational models often lack access to up-to-date Python and API documentation. As a result, code generated by these models may contain errors stemming from outdated calls or methods. ## Building a Strong Python Code Model If you aspire to build a robust Python code model, we recommend the following steps: 1. Pretrain with Mistral 7b on UPTODATE Python and API documentations. (during our testing we found even when a model writes syntactyically correct code it lacks up to date api calls and functions.) 2. Consider incorporating programming textbooks into your training. 3. Fine-tune your model with our dataset using SFT (Supervised Fine-Tuning). In the future, we may also release our "not working" code dataset, allowing users to create a Discriminative Pretraining Objective (DPO) model to reward functional code over non-functional code. Although with the second script provided, it would be pretty easy to do it your self. We hope this dataset serves as a valuable resource for the community and contributes to the improvement of code-related AI models. Why there are some references to 188k, we had used a script to count the examples in the dataset, and not realized the script wasn't meant to alpaca datasets, so it counted the examples wrong. Therefore, this is "only" 22k of functioning python code examples. However we are soon to release a better coding dataset, people will be even very happy with, containing over 220,000 examples of code (only tested for python code but contains many other languages.) I will also be releasing 13k examples of not working code, for the purpose of a DPO datasets, or RLHF.
[ "# Vezora's CodeTester Dataset\n\n!License", "## Introduction\n\nToday, on November 2, 2023, we are excited to release our internal Python dataset with 22,600 examples of code. These examples have been meticulously tested and verified as working. Our dataset was created using a script we developed.", "### Dataset Creation\n\n- Our script operates by extracting Python code from the output section of Alpaca-formatted datasets. It tests each extracted piece of code, keeping it if it passes and removing it if it fails, then saves all the working code in a seperate dataset.\n- Our second script works by removing the not working code from your alpaca datasets, and saves it to a not working code json, and then keeps all the working examples along with any other non python related examples, and saves it.\n- !WARNING! this script does run on ypur local computer, with mutithreading so it runs fast, if there is any malicious python code in your dataset, it WILL run on your local computer so either run in a VM or don't sift through shady datasets. Lastly, it is required that you have python packages installed, just main ones most would have already installed but some like tkinter and other packages in order for certain lines of code to be tested.\n- (if you are struggling converting your dataset to alpaca format, give the first three questions of both datasets and ask chat gpt or bing to give you a script to convert the dataset to that format you want. Might take one or two tries.)\n- The creation of this dataset involved leveraging open source datasets from various sources, including Wizard-LM's Evol datasets, CodeUp's 19k, Sahils2801's Code Alpaca, Eric Heartford's Dolphin, and a selection of hand-prompted GPT-4 code questions. The resulting dataset was carefully deduplicated.\n- We discovered that many of the open source datasets contained thousands of non-functional code examples, often plagued by module errors and other issues. Importantly, our script's approach is highly adaptable and could potentially be used to test code in other languages such as C++, C, SQL, and more.", "### Usage Guidelines\n\nWe invested a significant amount of time in developing this script. If you intend to use it to extract functional code in your own projects or datasets, and or plan on using our dataset, please include the following attribution in your model's or dataset's repository:\n\n\"Filtered Using Vezora's CodeTester\"", "## Motivation\n\nWe are releasing our internal tool thanks to Open Chat 3.5's recognition of its foundational model limitations, particularly in tasks related to code.", "### Limitations of Foundational Models\n\nIt's essential to note that even when writing syntactically correct code, foundational models often lack access to up-to-date Python and API documentation. As a result, code generated by these models may contain errors stemming from outdated calls or methods.", "## Building a Strong Python Code Model\n\nIf you aspire to build a robust Python code model, we recommend the following steps:\n\n1. Pretrain with Mistral 7b on UPTODATE Python and API documentations. (during our testing we found even when a model writes syntactyically correct code it lacks up to date api calls and functions.)\n2. Consider incorporating programming textbooks into your training.\n3. Fine-tune your model with our dataset using SFT (Supervised Fine-Tuning).\n\nIn the future, we may also release our \"not working\" code dataset, allowing users to create a Discriminative Pretraining Objective (DPO) model to reward functional code over non-functional code. Although with the second script provided, it would be pretty easy to do it your self.\n\nWe hope this dataset serves as a valuable resource for the community and contributes to the improvement of code-related AI models.\n\nWhy there are some references to 188k, we had used a script to count the examples in the dataset, and not realized the script wasn't meant to alpaca datasets, so it counted the examples wrong. Therefore, this is \"only\" 22k of functioning python code examples.\nHowever we are soon to release a better coding dataset, people will be even very happy with, containing over 220,000 examples of code (only tested for python code but contains many other languages.)\nI will also be releasing 13k examples of not working code, for the purpose of a DPO datasets, or RLHF." ]
[ "TAGS\n#license-apache-2.0 #region-us \n", "# Vezora's CodeTester Dataset\n\n!License", "## Introduction\n\nToday, on November 2, 2023, we are excited to release our internal Python dataset with 22,600 examples of code. These examples have been meticulously tested and verified as working. Our dataset was created using a script we developed.", "### Dataset Creation\n\n- Our script operates by extracting Python code from the output section of Alpaca-formatted datasets. It tests each extracted piece of code, keeping it if it passes and removing it if it fails, then saves all the working code in a seperate dataset.\n- Our second script works by removing the not working code from your alpaca datasets, and saves it to a not working code json, and then keeps all the working examples along with any other non python related examples, and saves it.\n- !WARNING! this script does run on ypur local computer, with mutithreading so it runs fast, if there is any malicious python code in your dataset, it WILL run on your local computer so either run in a VM or don't sift through shady datasets. Lastly, it is required that you have python packages installed, just main ones most would have already installed but some like tkinter and other packages in order for certain lines of code to be tested.\n- (if you are struggling converting your dataset to alpaca format, give the first three questions of both datasets and ask chat gpt or bing to give you a script to convert the dataset to that format you want. Might take one or two tries.)\n- The creation of this dataset involved leveraging open source datasets from various sources, including Wizard-LM's Evol datasets, CodeUp's 19k, Sahils2801's Code Alpaca, Eric Heartford's Dolphin, and a selection of hand-prompted GPT-4 code questions. The resulting dataset was carefully deduplicated.\n- We discovered that many of the open source datasets contained thousands of non-functional code examples, often plagued by module errors and other issues. Importantly, our script's approach is highly adaptable and could potentially be used to test code in other languages such as C++, C, SQL, and more.", "### Usage Guidelines\n\nWe invested a significant amount of time in developing this script. If you intend to use it to extract functional code in your own projects or datasets, and or plan on using our dataset, please include the following attribution in your model's or dataset's repository:\n\n\"Filtered Using Vezora's CodeTester\"", "## Motivation\n\nWe are releasing our internal tool thanks to Open Chat 3.5's recognition of its foundational model limitations, particularly in tasks related to code.", "### Limitations of Foundational Models\n\nIt's essential to note that even when writing syntactically correct code, foundational models often lack access to up-to-date Python and API documentation. As a result, code generated by these models may contain errors stemming from outdated calls or methods.", "## Building a Strong Python Code Model\n\nIf you aspire to build a robust Python code model, we recommend the following steps:\n\n1. Pretrain with Mistral 7b on UPTODATE Python and API documentations. (during our testing we found even when a model writes syntactyically correct code it lacks up to date api calls and functions.)\n2. Consider incorporating programming textbooks into your training.\n3. Fine-tune your model with our dataset using SFT (Supervised Fine-Tuning).\n\nIn the future, we may also release our \"not working\" code dataset, allowing users to create a Discriminative Pretraining Objective (DPO) model to reward functional code over non-functional code. Although with the second script provided, it would be pretty easy to do it your self.\n\nWe hope this dataset serves as a valuable resource for the community and contributes to the improvement of code-related AI models.\n\nWhy there are some references to 188k, we had used a script to count the examples in the dataset, and not realized the script wasn't meant to alpaca datasets, so it counted the examples wrong. Therefore, this is \"only\" 22k of functioning python code examples.\nHowever we are soon to release a better coding dataset, people will be even very happy with, containing over 220,000 examples of code (only tested for python code but contains many other languages.)\nI will also be releasing 13k examples of not working code, for the purpose of a DPO datasets, or RLHF." ]
[ 14, 14, 56, 458, 82, 34, 68, 353 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n# Vezora's CodeTester Dataset\n\n!License## Introduction\n\nToday, on November 2, 2023, we are excited to release our internal Python dataset with 22,600 examples of code. These examples have been meticulously tested and verified as working. Our dataset was created using a script we developed.", "passage: ### Dataset Creation\n\n- Our script operates by extracting Python code from the output section of Alpaca-formatted datasets. It tests each extracted piece of code, keeping it if it passes and removing it if it fails, then saves all the working code in a seperate dataset.\n- Our second script works by removing the not working code from your alpaca datasets, and saves it to a not working code json, and then keeps all the working examples along with any other non python related examples, and saves it.\n- !WARNING! this script does run on ypur local computer, with mutithreading so it runs fast, if there is any malicious python code in your dataset, it WILL run on your local computer so either run in a VM or don't sift through shady datasets. Lastly, it is required that you have python packages installed, just main ones most would have already installed but some like tkinter and other packages in order for certain lines of code to be tested.\n- (if you are struggling converting your dataset to alpaca format, give the first three questions of both datasets and ask chat gpt or bing to give you a script to convert the dataset to that format you want. Might take one or two tries.)\n- The creation of this dataset involved leveraging open source datasets from various sources, including Wizard-LM's Evol datasets, CodeUp's 19k, Sahils2801's Code Alpaca, Eric Heartford's Dolphin, and a selection of hand-prompted GPT-4 code questions. The resulting dataset was carefully deduplicated.\n- We discovered that many of the open source datasets contained thousands of non-functional code examples, often plagued by module errors and other issues. Importantly, our script's approach is highly adaptable and could potentially be used to test code in other languages such as C++, C, SQL, and more.### Usage Guidelines\n\nWe invested a significant amount of time in developing this script. If you intend to use it to extract functional code in your own projects or datasets, and or plan on using our dataset, please include the following attribution in your model's or dataset's repository:\n\n\"Filtered Using Vezora's CodeTester\"## Motivation\n\nWe are releasing our internal tool thanks to Open Chat 3.5's recognition of its foundational model limitations, particularly in tasks related to code.### Limitations of Foundational Models\n\nIt's essential to note that even when writing syntactically correct code, foundational models often lack access to up-to-date Python and API documentation. As a result, code generated by these models may contain errors stemming from outdated calls or methods." ]
5589febabff7ee50c0c04c72514cf83e41b94938
# Dataset Card for "cstnews_data-wiki_temario_results" rouge= {'rouge1': 0.5525303131963668, 'rouge2': 0.3319831382549481, 'rougeL': 0.40901822666642607, 'rougeLsum': 0.40901822666642607} bert= {'precision': 0.782619547098875, 'recall': 0.8086873143911362, 'f1': 0.7947092242538929} mover = 0.6412309719553281
arthurmluz/cstnews_data-wiki_temario_results
[ "region:us" ]
2023-11-03T01:08:51+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 61374, "num_examples": 16}], "download_size": 58330, "dataset_size": 61374}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:49:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cstnews_data-wiki_temario_results" rouge= {'rouge1': 0.5525303131963668, 'rouge2': 0.3319831382549481, 'rougeL': 0.40901822666642607, 'rougeLsum': 0.40901822666642607} bert= {'precision': 0.782619547098875, 'recall': 0.8086873143911362, 'f1': 0.7947092242538929} mover = 0.6412309719553281
[ "# Dataset Card for \"cstnews_data-wiki_temario_results\"\n\nrouge= {'rouge1': 0.5525303131963668, 'rouge2': 0.3319831382549481, 'rougeL': 0.40901822666642607, 'rougeLsum': 0.40901822666642607}\n\nbert= {'precision': 0.782619547098875, 'recall': 0.8086873143911362, 'f1': 0.7947092242538929}\n\nmover = 0.6412309719553281" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cstnews_data-wiki_temario_results\"\n\nrouge= {'rouge1': 0.5525303131963668, 'rouge2': 0.3319831382549481, 'rougeL': 0.40901822666642607, 'rougeLsum': 0.40901822666642607}\n\nbert= {'precision': 0.782619547098875, 'recall': 0.8086873143911362, 'f1': 0.7947092242538929}\n\nmover = 0.6412309719553281" ]
[ 6, 140 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cstnews_data-wiki_temario_results\"\n\nrouge= {'rouge1': 0.5525303131963668, 'rouge2': 0.3319831382549481, 'rougeL': 0.40901822666642607, 'rougeLsum': 0.40901822666642607}\n\nbert= {'precision': 0.782619547098875, 'recall': 0.8086873143911362, 'f1': 0.7947092242538929}\n\nmover = 0.6412309719553281" ]
972075c9b55bc93f0b9ee72f0797202ecc656744
# Dataset Card for "SDv2-GPT4Spatial-200" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Doub7e/SDv2-GPT4Spatial-200
[ "region:us" ]
2023-11-03T01:09:17+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "prompt", "dtype": "string"}, {"name": "T5_last_hidden_states", "sequence": {"sequence": {"sequence": "float32"}}}], "splits": [{"name": "train", "num_bytes": 203072791.0, "num_examples": 200}], "download_size": 204322556, "dataset_size": 203072791.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-03T01:26:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "SDv2-GPT4Spatial-200" More Information needed
[ "# Dataset Card for \"SDv2-GPT4Spatial-200\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"SDv2-GPT4Spatial-200\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"SDv2-GPT4Spatial-200\"\n\nMore Information needed" ]
2c91f85e1f7e8a0cfb14a1e28653287329d8a35a
# Dataset Card for "cstnews_data-xlsumm_results" rouge= {'rouge1': 0.24238643847479127, 'rouge2': 0.09767172698855643, 'rougeL': 0.17246288464026294, 'rougeLsum': 0.17246288464026294} bert= {'precision': 0.7629447244107723, 'recall': 0.6678285673260689, 'f1': 0.710793524980545} mover = 0.5670640524432061
arthurmluz/cstnews_data-xlsum_results
[ "region:us" ]
2023-11-03T01:10:29+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 50481, "num_examples": 16}], "download_size": 48161, "dataset_size": 50481}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:51:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cstnews_data-xlsumm_results" rouge= {'rouge1': 0.24238643847479127, 'rouge2': 0.09767172698855643, 'rougeL': 0.17246288464026294, 'rougeLsum': 0.17246288464026294} bert= {'precision': 0.7629447244107723, 'recall': 0.6678285673260689, 'f1': 0.710793524980545} mover = 0.5670640524432061
[ "# Dataset Card for \"cstnews_data-xlsumm_results\"\n\nrouge= {'rouge1': 0.24238643847479127, 'rouge2': 0.09767172698855643, 'rougeL': 0.17246288464026294, 'rougeLsum': 0.17246288464026294}\n\nbert= {'precision': 0.7629447244107723, 'recall': 0.6678285673260689, 'f1': 0.710793524980545}\n\nmover = 0.5670640524432061" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cstnews_data-xlsumm_results\"\n\nrouge= {'rouge1': 0.24238643847479127, 'rouge2': 0.09767172698855643, 'rougeL': 0.17246288464026294, 'rougeLsum': 0.17246288464026294}\n\nbert= {'precision': 0.7629447244107723, 'recall': 0.6678285673260689, 'f1': 0.710793524980545}\n\nmover = 0.5670640524432061" ]
[ 6, 138 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cstnews_data-xlsumm_results\"\n\nrouge= {'rouge1': 0.24238643847479127, 'rouge2': 0.09767172698855643, 'rougeL': 0.17246288464026294, 'rougeLsum': 0.17246288464026294}\n\nbert= {'precision': 0.7629447244107723, 'recall': 0.6678285673260689, 'f1': 0.710793524980545}\n\nmover = 0.5670640524432061" ]
c322122666527d2626a37ac0370a512f047c4c81
# Dataset Card for "cstnews_data-xlsumm_cstnews_results" rouge= {'rouge1': 0.5498543272343877, 'rouge2': 0.37081395676717904, 'rougeL': 0.4140737044829083, 'rougeLsum': 0.4140737044829083} bert= {'precision': 0.8062506914138794, 'recall': 0.8010864742100239, 'f1': 0.8028265088796616} mover = 0.6432560342837185
arthurmluz/cstnews_data-xlsum_cstnews_results
[ "region:us" ]
2023-11-03T01:11:41+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 56672, "num_examples": 16}], "download_size": 54739, "dataset_size": 56672}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-15T03:52:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cstnews_data-xlsumm_cstnews_results" rouge= {'rouge1': 0.5498543272343877, 'rouge2': 0.37081395676717904, 'rougeL': 0.4140737044829083, 'rougeLsum': 0.4140737044829083} bert= {'precision': 0.8062506914138794, 'recall': 0.8010864742100239, 'f1': 0.8028265088796616} mover = 0.6432560342837185
[ "# Dataset Card for \"cstnews_data-xlsumm_cstnews_results\"\n\nrouge= {'rouge1': 0.5498543272343877, 'rouge2': 0.37081395676717904, 'rougeL': 0.4140737044829083, 'rougeLsum': 0.4140737044829083}\n\nbert= {'precision': 0.8062506914138794, 'recall': 0.8010864742100239, 'f1': 0.8028265088796616}\n\nmover = 0.6432560342837185" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cstnews_data-xlsumm_cstnews_results\"\n\nrouge= {'rouge1': 0.5498543272343877, 'rouge2': 0.37081395676717904, 'rougeL': 0.4140737044829083, 'rougeLsum': 0.4140737044829083}\n\nbert= {'precision': 0.8062506914138794, 'recall': 0.8010864742100239, 'f1': 0.8028265088796616}\n\nmover = 0.6432560342837185" ]
[ 6, 142 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cstnews_data-xlsumm_cstnews_results\"\n\nrouge= {'rouge1': 0.5498543272343877, 'rouge2': 0.37081395676717904, 'rougeL': 0.4140737044829083, 'rougeLsum': 0.4140737044829083}\n\nbert= {'precision': 0.8062506914138794, 'recall': 0.8010864742100239, 'f1': 0.8028265088796616}\n\nmover = 0.6432560342837185" ]