sha
stringlengths 40
40
| text
stringlengths 1
13.4M
| id
stringlengths 2
117
| tags
listlengths 1
7.91k
| created_at
stringlengths 25
25
| metadata
stringlengths 2
875k
| last_modified
stringlengths 25
25
| arxiv
listlengths 0
25
| languages
listlengths 0
7.91k
| tags_str
stringlengths 17
159k
| text_str
stringlengths 1
447k
| text_lists
listlengths 0
352
| processed_texts
listlengths 1
353
| tokens_length
listlengths 1
353
| input_texts
listlengths 1
40
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
da25db7bcd20115ca1f496c120ecb952230b3b01 | # Dataset Card for "training_v2-public"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | male-2/training_v2-public | [
"region:us"
]
| 2023-11-09T14:55:23+00:00 | {"dataset_info": {"features": [{"name": "conversation", "dtype": "string"}, {"name": "type", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1091, "num_examples": 1}], "download_size": 8505, "dataset_size": 1091}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-09T14:55:26+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "training_v2-public"
More Information needed | [
"# Dataset Card for \"training_v2-public\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"training_v2-public\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"training_v2-public\"\n\nMore Information needed"
]
|
21202a287a5714ced13d8be3cf24da7e529fd670 | # Dataset Card for "turkishReviews-ds-mini"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | imelike/turkishReviews-ds-mini | [
"region:us"
]
| 2023-11-09T14:56:22+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "review", "dtype": "string"}, {"name": "review_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1252876.2642514652, "num_examples": 3378}, {"name": "validation", "num_bytes": 139455.7357485349, "num_examples": 376}], "download_size": 896651, "dataset_size": 1392332.0}} | 2023-11-18T18:45:50+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "turkishReviews-ds-mini"
More Information needed | [
"# Dataset Card for \"turkishReviews-ds-mini\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"turkishReviews-ds-mini\"\n\nMore Information needed"
]
| [
6,
19
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"turkishReviews-ds-mini\"\n\nMore Information needed"
]
|
4ca3b142af2e7bef66e3d2c07a3795f10786339d | # Dataset Card for "bw_spec_cls_80_27"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_27 | [
"region:us"
]
| 2023-11-09T14:56:58+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "63804", "1": "63805", "2": "63874", "3": "63900", "4": "63908", "5": "63922", "6": "63936", "7": "63999", "8": "64005", "9": "64006", "10": "64007", "11": "64008", "12": "64009", "13": "64035", "14": "64078", "15": "64091", "16": "64093", "17": "64247", "18": "64248", "19": "64249", "20": "64252", "21": "64253", "22": "64331", "23": "64332", "24": "64333", "25": "64334", "26": "64338", "27": "64364", "28": "64365", "29": "64366", "30": "64407", "31": "64409", "32": "64410", "33": "64535", "34": "64536", "35": "64537", "36": "64538", "37": "64542", "38": "64553", "39": "64556", "40": "64567", "41": "64594", "42": "64601", "43": "64604", "44": "64659", "45": "64787", "46": "64788", "47": "64789", "48": "64796", "49": "64809", "50": "64834", "51": "64840", "52": "64841", "53": "64854", "54": "64855", "55": "64856", "56": "64857", "57": "64858", "58": "64859", "59": "64860", "60": "64861", "61": "64862", "62": "64863", "63": "64864", "64": "64865", "65": "64866", "66": "64893", "67": "64895", "68": "64896", "69": "64918", "70": "64919", "71": "64988", "72": "64989", "73": "64990", "74": "64991", "75": "64992", "76": "64993", "77": "64994", "78": "64995", "79": "65063"}}}}], "splits": [{"name": "train", "num_bytes": 88333251.2, "num_examples": 1600}, {"name": "test", "num_bytes": 22046259.0, "num_examples": 400}], "download_size": 110321369, "dataset_size": 110379510.2}} | 2023-11-09T14:57:14+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_27"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_27\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_27\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_27\"\n\nMore Information needed"
]
|
226ac20c686006e65fd3b877f848cc18481d5a81 | # Dataset Card for "qa-with-answer"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Back-up/qa-with-answer | [
"region:us"
]
| 2023-11-09T14:59:58+00:00 | {"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}, {"name": "is_impossible", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 24536185.07080859, "num_examples": 19240}], "download_size": 4197812, "dataset_size": 24536185.07080859}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-09T15:00:00+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "qa-with-answer"
More Information needed | [
"# Dataset Card for \"qa-with-answer\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"qa-with-answer\"\n\nMore Information needed"
]
| [
6,
16
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"qa-with-answer\"\n\nMore Information needed"
]
|
779d2b4754710f510c2509a44a99c4a843513612 | # Dataset Card for "qa-no-answer"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Back-up/qa-no-answer | [
"region:us"
]
| 2023-11-09T15:00:01+00:00 | {"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}, {"name": "is_impossible", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 11754158.929191412, "num_examples": 9217}], "download_size": 2677376, "dataset_size": 11754158.929191412}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-09T15:00:03+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "qa-no-answer"
More Information needed | [
"# Dataset Card for \"qa-no-answer\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"qa-no-answer\"\n\nMore Information needed"
]
| [
6,
16
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"qa-no-answer\"\n\nMore Information needed"
]
|
7fd023d8eb3047a059672af8c93bf3abb0c76cab | # Dataset Card for "dataset_2000_decompese_question_0"
The dataset has struct
```json
{
"complex_question": "Does Mercury help detect coronavirus?",
"entities": ["Mercury", "coronavirus"],
"triples": [
{
"question": "What is the name of the coronavirus?",
"evidence": "str...",
"answer": "The coronavirus is called COVID-19"
},
{
"question": "Does Mercury help detect COVID-19?",
"evidence": [
"",
"",
""
],
"answer": "Mercury does not help detect COVID-19"
},
{
"question": "What is mercury used to detect?",
"evidence": "str...",
"answer": "Mercury is used to detect the temperature of things"
},
{
"question": "What are some symtoms of coronavirus?",
"evidence": "str...",
"answer": "Common symtoms of coronavirus are fever..."
}
],
"answer": "Yes, ..."
}
```
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | presencesw/dataset_2000_decompese_question_0 | [
"region:us"
]
| 2023-11-09T15:03:53+00:00 | {"dataset_info": {"features": [{"name": "entities", "sequence": "null"}, {"name": "triplets", "list": [{"name": "question", "dtype": "string"}]}, {"name": "answer", "dtype": "string"}, {"name": "complex_question", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 70060, "num_examples": 199}], "download_size": 26888, "dataset_size": 70060}} | 2023-11-09T15:42:01+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "dataset_2000_decompese_question_0"
The dataset has struct
More Information needed | [
"# Dataset Card for \"dataset_2000_decompese_question_0\"\nThe dataset has struct\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"dataset_2000_decompese_question_0\"\nThe dataset has struct\n\nMore Information needed"
]
| [
6,
29
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"dataset_2000_decompese_question_0\"\nThe dataset has struct\n\nMore Information needed"
]
|
8885372fe73256f96bb60f65b550538ac5c26047 |
# Dataset Card for "tomaarsen/setfit-absa-semeval-restaurants"
### Dataset Summary
This dataset contains the manually annotated restaurant reviews from SemEval-2014 Task 4, in the format as
understood by [SetFit](https://github.com/huggingface/setfit) ABSA.
For more details, see https://aclanthology.org/S14-2004/
### Data Instances
An example of "train" looks as follows.
```json
{"text": "But the staff was so horrible to us.", "span": "staff", "label": "negative", "ordinal": 0}
{"text": "To be completely fair, the only redeeming factor was the food, which was above average, but couldn't make up for all the other deficiencies of Teodora.", "span": "food", "label": "positive", "ordinal": 0}
{"text": "The food is uniformly exceptional, with a very capable kitchen which will proudly whip up whatever you feel like eating, whether it's on the menu or not.", "span": "food", "label": "positive", "ordinal": 0}
{"text": "The food is uniformly exceptional, with a very capable kitchen which will proudly whip up whatever you feel like eating, whether it's on the menu or not.", "span": "kitchen", "label": "positive", "ordinal": 0}
{"text": "The food is uniformly exceptional, with a very capable kitchen which will proudly whip up whatever you feel like eating, whether it's on the menu or not.", "span": "menu", "label": "neutral", "ordinal": 0}
```
### Data Fields
The data fields are the same among all splits.
- `text`: a `string` feature.
- `span`: a `string` feature showing the aspect span from the text.
- `label`: a `string` feature showing the polarity of the aspect span.
- `ordinal`: an `int64` feature showing the n-th occurrence of the span in the text. This is useful for if the span occurs within the same text multiple times.
### Data Splits
| name |train|test|
|---------|----:|---:|
|tomaarsen/setfit-absa-semeval-restaurants|3693|1134|
### Training ABSA models using SetFit ABSA
To train using this dataset, first install the SetFit library:
```bash
pip install setfit
```
And then you can use the following script as a guideline of how to train an ABSA model on this dataset:
```python
from setfit import AbsaModel, AbsaTrainer, TrainingArguments
from datasets import load_dataset
from transformers import EarlyStoppingCallback
# You can initialize a AbsaModel using one or two SentenceTransformer models, or two ABSA models
model = AbsaModel.from_pretrained("sentence-transformers/all-MiniLM-L6-v2")
# The training/eval dataset must have `text`, `span`, `polarity`, and `ordinal` columns
dataset = load_dataset("tomaarsen/setfit-absa-semeval-restaurants")
train_dataset = dataset["train"]
eval_dataset = dataset["test"]
args = TrainingArguments(
output_dir="models",
use_amp=True,
batch_size=256,
eval_steps=50,
save_steps=50,
load_best_model_at_end=True,
)
trainer = AbsaTrainer(
model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
callbacks=[EarlyStoppingCallback(early_stopping_patience=5)],
)
trainer.train()
metrics = trainer.evaluate(eval_dataset)
print(metrics)
trainer.push_to_hub("tomaarsen/setfit-absa-restaurants")
```
You can then run inference like so:
```python
from setfit import AbsaModel
# Download from Hub and run inference
model = AbsaModel.from_pretrained(
"tomaarsen/setfit-absa-restaurants-aspect",
"tomaarsen/setfit-absa-restaurants-polarity",
)
# Run inference
preds = model([
"The best pizza outside of Italy and really tasty.",
"The food here is great but the service is terrible",
])
```
### Citation Information
```bibtex
@inproceedings{pontiki-etal-2014-semeval,
title = "{S}em{E}val-2014 Task 4: Aspect Based Sentiment Analysis",
author = "Pontiki, Maria and
Galanis, Dimitris and
Pavlopoulos, John and
Papageorgiou, Harris and
Androutsopoulos, Ion and
Manandhar, Suresh",
editor = "Nakov, Preslav and
Zesch, Torsten",
booktitle = "Proceedings of the 8th International Workshop on Semantic Evaluation ({S}em{E}val 2014)",
month = aug,
year = "2014",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/S14-2004",
doi = "10.3115/v1/S14-2004",
pages = "27--35",
}
```
| tomaarsen/setfit-absa-semeval-restaurants | [
"region:us"
]
| 2023-11-09T15:14:48+00:00 | {"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "span", "dtype": "string"}, {"name": "label", "dtype": "string"}, {"name": "ordinal", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 490223, "num_examples": 3693}, {"name": "test", "num_bytes": 138187, "num_examples": 1134}], "download_size": 193352, "dataset_size": 628410}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]} | 2023-11-16T10:37:20+00:00 | []
| []
| TAGS
#region-us
| Dataset Card for "tomaarsen/setfit-absa-semeval-restaurants"
============================================================
### Dataset Summary
This dataset contains the manually annotated restaurant reviews from SemEval-2014 Task 4, in the format as
understood by SetFit ABSA.
For more details, see URL
### Data Instances
An example of "train" looks as follows.
### Data Fields
The data fields are the same among all splits.
* 'text': a 'string' feature.
* 'span': a 'string' feature showing the aspect span from the text.
* 'label': a 'string' feature showing the polarity of the aspect span.
* 'ordinal': an 'int64' feature showing the n-th occurrence of the span in the text. This is useful for if the span occurs within the same text multiple times.
### Data Splits
### Training ABSA models using SetFit ABSA
To train using this dataset, first install the SetFit library:
And then you can use the following script as a guideline of how to train an ABSA model on this dataset:
You can then run inference like so:
| [
"### Dataset Summary\n\n\nThis dataset contains the manually annotated restaurant reviews from SemEval-2014 Task 4, in the format as\nunderstood by SetFit ABSA.\n\n\nFor more details, see URL",
"### Data Instances\n\n\nAn example of \"train\" looks as follows.",
"### Data Fields\n\n\nThe data fields are the same among all splits.\n\n\n* 'text': a 'string' feature.\n* 'span': a 'string' feature showing the aspect span from the text.\n* 'label': a 'string' feature showing the polarity of the aspect span.\n* 'ordinal': an 'int64' feature showing the n-th occurrence of the span in the text. This is useful for if the span occurs within the same text multiple times.",
"### Data Splits",
"### Training ABSA models using SetFit ABSA\n\n\nTo train using this dataset, first install the SetFit library:\n\n\nAnd then you can use the following script as a guideline of how to train an ABSA model on this dataset:\n\n\nYou can then run inference like so:"
]
| [
"TAGS\n#region-us \n",
"### Dataset Summary\n\n\nThis dataset contains the manually annotated restaurant reviews from SemEval-2014 Task 4, in the format as\nunderstood by SetFit ABSA.\n\n\nFor more details, see URL",
"### Data Instances\n\n\nAn example of \"train\" looks as follows.",
"### Data Fields\n\n\nThe data fields are the same among all splits.\n\n\n* 'text': a 'string' feature.\n* 'span': a 'string' feature showing the aspect span from the text.\n* 'label': a 'string' feature showing the polarity of the aspect span.\n* 'ordinal': an 'int64' feature showing the n-th occurrence of the span in the text. This is useful for if the span occurs within the same text multiple times.",
"### Data Splits",
"### Training ABSA models using SetFit ABSA\n\n\nTo train using this dataset, first install the SetFit library:\n\n\nAnd then you can use the following script as a guideline of how to train an ABSA model on this dataset:\n\n\nYou can then run inference like so:"
]
| [
6,
45,
18,
109,
5,
60
]
| [
"passage: TAGS\n#region-us \n### Dataset Summary\n\n\nThis dataset contains the manually annotated restaurant reviews from SemEval-2014 Task 4, in the format as\nunderstood by SetFit ABSA.\n\n\nFor more details, see URL### Data Instances\n\n\nAn example of \"train\" looks as follows.### Data Fields\n\n\nThe data fields are the same among all splits.\n\n\n* 'text': a 'string' feature.\n* 'span': a 'string' feature showing the aspect span from the text.\n* 'label': a 'string' feature showing the polarity of the aspect span.\n* 'ordinal': an 'int64' feature showing the n-th occurrence of the span in the text. This is useful for if the span occurs within the same text multiple times.### Data Splits### Training ABSA models using SetFit ABSA\n\n\nTo train using this dataset, first install the SetFit library:\n\n\nAnd then you can use the following script as a guideline of how to train an ABSA model on this dataset:\n\n\nYou can then run inference like so:"
]
|
4d69e3de15751c40ac3829ccd31c3fa190f2068f |
# Dataset Card for "tomaarsen/setfit-absa-semeval-laptops"
### Dataset Summary
This dataset contains the manually annotated laptop reviews from SemEval-2014 Task 4, in the format as
understood by [SetFit](https://github.com/huggingface/setfit) ABSA.
For more details, see https://aclanthology.org/S14-2004/
### Data Instances
An example of "train" looks as follows.
```json
{"text": "I charge it at night and skip taking the cord with me because of the good battery life.", "span": "cord", "label": "neutral", "ordinal": 0}
{"text": "I charge it at night and skip taking the cord with me because of the good battery life.", "span": "battery life", "label": "positive", "ordinal": 0}
{"text": "The tech guy then said the service center does not do 1-to-1 exchange and I have to direct my concern to the \"sales\" team, which is the retail shop which I bought my netbook from.", "span": "service center", "label": "negative", "ordinal": 0}
{"text": "The tech guy then said the service center does not do 1-to-1 exchange and I have to direct my concern to the \"sales\" team, which is the retail shop which I bought my netbook from.", "span": "\"sales\" team", "label": "negative", "ordinal": 0}
{"text": "The tech guy then said the service center does not do 1-to-1 exchange and I have to direct my concern to the \"sales\" team, which is the retail shop which I bought my netbook from.", "span": "tech guy", "label": "neutral", "ordinal": 0}
```
### Data Fields
The data fields are the same among all splits.
- `text`: a `string` feature.
- `span`: a `string` feature showing the aspect span from the text.
- `label`: a `string` feature showing the polarity of the aspect span.
- `ordinal`: an `int64` feature showing the n-th occurrence of the span in the text. This is useful for if the span occurs within the same text multiple times.
### Data Splits
| name |train|test|
|---------|----:|---:|
|tomaarsen/setfit-absa-semeval-laptops|2358|654|
### Training ABSA models using SetFit ABSA
To train using this dataset, first install the SetFit library:
```bash
pip install setfit
```
And then you can use the following script as a guideline of how to train an ABSA model on this dataset:
```python
from setfit import AbsaModel, AbsaTrainer, TrainingArguments
from datasets import load_dataset
from transformers import EarlyStoppingCallback
# You can initialize a AbsaModel using one or two SentenceTransformer models, or two ABSA models
model = AbsaModel.from_pretrained("sentence-transformers/all-MiniLM-L6-v2")
# The training/eval dataset must have `text`, `span`, `polarity`, and `ordinal` columns
dataset = load_dataset("tomaarsen/setfit-absa-semeval-laptops")
train_dataset = dataset["train"]
eval_dataset = dataset["test"]
args = TrainingArguments(
output_dir="models",
use_amp=True,
batch_size=256,
eval_steps=50,
save_steps=50,
load_best_model_at_end=True,
)
trainer = AbsaTrainer(
model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
callbacks=[EarlyStoppingCallback(early_stopping_patience=5)],
)
trainer.train()
metrics = trainer.evaluate(eval_dataset)
print(metrics)
trainer.push_to_hub("tomaarsen/setfit-absa-laptops")
```
You can then run inference like so:
```python
from setfit import AbsaModel
# Download from Hub and run inference
model = AbsaModel.from_pretrained(
"tomaarsen/setfit-absa-laptops-aspect",
"tomaarsen/setfit-absa-laptops-polarity",
)
# Run inference
preds = model([
"Boots up fast and runs great!",
"The screen shows great colors.",
])
```
### Citation Information
```bibtex
@inproceedings{pontiki-etal-2014-semeval,
title = "{S}em{E}val-2014 Task 4: Aspect Based Sentiment Analysis",
author = "Pontiki, Maria and
Galanis, Dimitris and
Pavlopoulos, John and
Papageorgiou, Harris and
Androutsopoulos, Ion and
Manandhar, Suresh",
editor = "Nakov, Preslav and
Zesch, Torsten",
booktitle = "Proceedings of the 8th International Workshop on Semantic Evaluation ({S}em{E}val 2014)",
month = aug,
year = "2014",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/S14-2004",
doi = "10.3115/v1/S14-2004",
pages = "27--35",
}
``` | tomaarsen/setfit-absa-semeval-laptops | [
"region:us"
]
| 2023-11-09T15:14:52+00:00 | {"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "span", "dtype": "string"}, {"name": "label", "dtype": "string"}, {"name": "ordinal", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 335243, "num_examples": 2358}, {"name": "test", "num_bytes": 76698, "num_examples": 654}], "download_size": 146971, "dataset_size": 411941}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]} | 2023-11-16T10:38:19+00:00 | []
| []
| TAGS
#region-us
| Dataset Card for "tomaarsen/setfit-absa-semeval-laptops"
========================================================
### Dataset Summary
This dataset contains the manually annotated laptop reviews from SemEval-2014 Task 4, in the format as
understood by SetFit ABSA.
For more details, see URL
### Data Instances
An example of "train" looks as follows.
### Data Fields
The data fields are the same among all splits.
* 'text': a 'string' feature.
* 'span': a 'string' feature showing the aspect span from the text.
* 'label': a 'string' feature showing the polarity of the aspect span.
* 'ordinal': an 'int64' feature showing the n-th occurrence of the span in the text. This is useful for if the span occurs within the same text multiple times.
### Data Splits
### Training ABSA models using SetFit ABSA
To train using this dataset, first install the SetFit library:
And then you can use the following script as a guideline of how to train an ABSA model on this dataset:
You can then run inference like so:
| [
"### Dataset Summary\n\n\nThis dataset contains the manually annotated laptop reviews from SemEval-2014 Task 4, in the format as\nunderstood by SetFit ABSA.\n\n\nFor more details, see URL",
"### Data Instances\n\n\nAn example of \"train\" looks as follows.",
"### Data Fields\n\n\nThe data fields are the same among all splits.\n\n\n* 'text': a 'string' feature.\n* 'span': a 'string' feature showing the aspect span from the text.\n* 'label': a 'string' feature showing the polarity of the aspect span.\n* 'ordinal': an 'int64' feature showing the n-th occurrence of the span in the text. This is useful for if the span occurs within the same text multiple times.",
"### Data Splits",
"### Training ABSA models using SetFit ABSA\n\n\nTo train using this dataset, first install the SetFit library:\n\n\nAnd then you can use the following script as a guideline of how to train an ABSA model on this dataset:\n\n\nYou can then run inference like so:"
]
| [
"TAGS\n#region-us \n",
"### Dataset Summary\n\n\nThis dataset contains the manually annotated laptop reviews from SemEval-2014 Task 4, in the format as\nunderstood by SetFit ABSA.\n\n\nFor more details, see URL",
"### Data Instances\n\n\nAn example of \"train\" looks as follows.",
"### Data Fields\n\n\nThe data fields are the same among all splits.\n\n\n* 'text': a 'string' feature.\n* 'span': a 'string' feature showing the aspect span from the text.\n* 'label': a 'string' feature showing the polarity of the aspect span.\n* 'ordinal': an 'int64' feature showing the n-th occurrence of the span in the text. This is useful for if the span occurs within the same text multiple times.",
"### Data Splits",
"### Training ABSA models using SetFit ABSA\n\n\nTo train using this dataset, first install the SetFit library:\n\n\nAnd then you can use the following script as a guideline of how to train an ABSA model on this dataset:\n\n\nYou can then run inference like so:"
]
| [
6,
45,
18,
109,
5,
60
]
| [
"passage: TAGS\n#region-us \n### Dataset Summary\n\n\nThis dataset contains the manually annotated laptop reviews from SemEval-2014 Task 4, in the format as\nunderstood by SetFit ABSA.\n\n\nFor more details, see URL### Data Instances\n\n\nAn example of \"train\" looks as follows.### Data Fields\n\n\nThe data fields are the same among all splits.\n\n\n* 'text': a 'string' feature.\n* 'span': a 'string' feature showing the aspect span from the text.\n* 'label': a 'string' feature showing the polarity of the aspect span.\n* 'ordinal': an 'int64' feature showing the n-th occurrence of the span in the text. This is useful for if the span occurs within the same text multiple times.### Data Splits### Training ABSA models using SetFit ABSA\n\n\nTo train using this dataset, first install the SetFit library:\n\n\nAnd then you can use the following script as a guideline of how to train an ABSA model on this dataset:\n\n\nYou can then run inference like so:"
]
|
d75d0be34f98053d6ccd5976f2b5cd48b1b27d21 | # Dataset Card for "synpre_extract_q10_a5_1M"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | tyzhu/synpre_extract_q10_a5_1M | [
"region:us"
]
| 2023-11-09T15:18:53+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}, {"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "validation", "num_bytes": 9241485, "num_examples": 9777}, {"name": "train", "num_bytes": 925947541, "num_examples": 976352}], "download_size": 545422427, "dataset_size": 935189026}} | 2023-11-09T15:20:27+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "synpre_extract_q10_a5_1M"
More Information needed | [
"# Dataset Card for \"synpre_extract_q10_a5_1M\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"synpre_extract_q10_a5_1M\"\n\nMore Information needed"
]
| [
6,
23
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"synpre_extract_q10_a5_1M\"\n\nMore Information needed"
]
|
2636357ebc4fdf6ae22932d4c1a47b208f62a8a5 | # Dataset Card for "bw_spec_cls_80_28"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_28 | [
"region:us"
]
| 2023-11-09T15:23:31+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "65064", "1": "65073", "2": "65076", "3": "65077", "4": "65090", "5": "65234", "6": "65488", "7": "65619", "8": "65685", "9": "65752", "10": "65755", "11": "65756", "12": "65893", "13": "66058", "14": "66073", "15": "66074", "16": "66075", "17": "66076", "18": "66180", "19": "66187", "20": "66390", "21": "66405", "22": "66469", "23": "66482", "24": "66483", "25": "66525", "26": "66636", "27": "66637", "28": "66638", "29": "66641", "30": "66643", "31": "66644", "32": "66646", "33": "66648", "34": "66649", "35": "66650", "36": "66757", "37": "67007", "38": "67010", "39": "67011", "40": "67016", "41": "67017", "42": "67121", "43": "67163", "44": "67232", "45": "67233", "46": "67235", "47": "67308", "48": "67357", "49": "67358", "50": "67359", "51": "67360", "52": "67361", "53": "67362", "54": "67363", "55": "67366", "56": "67367", "57": "67368", "58": "67412", "59": "67470", "60": "67500", "61": "67553", "62": "67556", "63": "67557", "64": "67558", "65": "67597", "66": "67598", "67": "67600", "68": "67637", "69": "67639", "70": "67640", "71": "67660", "72": "67661", "73": "67673", "74": "67707", "75": "67760", "76": "67763", "77": "67764", "78": "67765", "79": "67766"}}}}], "splits": [{"name": "train", "num_bytes": 87471356.8, "num_examples": 1600}, {"name": "test", "num_bytes": 21888454.0, "num_examples": 400}], "download_size": 109587336, "dataset_size": 109359810.8}} | 2023-11-09T15:23:48+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_28"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_28\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_28\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_28\"\n\nMore Information needed"
]
|
4251b0bd4acc5faf8c32fb1d444d3c867992d995 |
This dataset comprises prompt/answer pairs related to the curriculum for Norwegian dentistry and dental hygiene students, specifically focusing on the subjects of radiation physics, radiation protection, and radiological technology.
This dataset is a replica of https://huggingface.co/datasets/geraldOslo/prompt_answer_dataset_for_norwegian_radiation_protection_in_dentistry, in Alpaca format.
| NbAiLab/torgersen-alpaca | [
"size_categories:1K<n<10K",
"language:no",
"license:cc-by-2.0",
"dentistry",
"physics",
"radiation protection",
"region:us"
]
| 2023-11-09T15:33:43+00:00 | {"language": ["no"], "license": "cc-by-2.0", "size_categories": ["1K<n<10K"], "pretty_name": "Question/answer connected to radiation protection in dentistry", "tags": ["dentistry", "physics", "radiation protection"], "configs": [{"config_name": "tab", "data_files": "question_answer_pairs_radiation_protection.csv", "sep": ";"}]} | 2023-11-09T15:48:55+00:00 | []
| [
"no"
]
| TAGS
#size_categories-1K<n<10K #language-Norwegian #license-cc-by-2.0 #dentistry #physics #radiation protection #region-us
|
This dataset comprises prompt/answer pairs related to the curriculum for Norwegian dentistry and dental hygiene students, specifically focusing on the subjects of radiation physics, radiation protection, and radiological technology.
This dataset is a replica of URL in Alpaca format.
| []
| [
"TAGS\n#size_categories-1K<n<10K #language-Norwegian #license-cc-by-2.0 #dentistry #physics #radiation protection #region-us \n"
]
| [
45
]
| [
"passage: TAGS\n#size_categories-1K<n<10K #language-Norwegian #license-cc-by-2.0 #dentistry #physics #radiation protection #region-us \n"
]
|
0590c4b1bafd3187cc99a3553069473bf0a2d891 |
# LIMA ERP data (LimaRP)
Following the principles highlighted in arXiv:2305.11206 by Zhou et al.
and replicated in some aspects by Kaiokendev with [SuperHOT](https://kaiokendev.github.io/til#lima-works),
the archive in this repository contains about **2000** manually selected and curated 1-on-1 human-human
roleplaying conversations and associated LLM-generated persona and scenario data. The RP conversations
all feature only two human participants, although occasionally the participants may play the role of more
than one character.
The conversation data is in the form of source files in .yaml format + basic Python script for building the
dataset, intended to be finetuned in "completion" format (similar to unsupervised finetuning).
Having reached the minimum number of examples suggested in the LIMA paper and after putting overall probably more
than 500 hours of work on manually gathering and curating the data, LimaRP can be considered a finished project
at this point in time. Future work (cleaning, trimming, expansion) would require more resources and community help.
### Notes
- **Be aware that although retrieved solely from age-restricted (18+) internet forums, the data contains
roleplaying elements and topics that may be considered extreme, distasteful, shocking,
inappropriate and disturbing. *Do not* download it if you're not sure of the legal ramifications of
possessing fictional _written_ content of _any_ kind in your country.**
- The first ~500 samples were designed to be trained with a 2048 tokens context size; the following 500 with
a 4096 tokens context size or greater. The later training samples (`data-long`) were designed for an 8192
tokens context size. Note that while the 8k samples can be reduced to 4k size, this can confuse the model to
some extent, as scenario and persona data may end up referring to events removed from the context.
- Please note that **no support will be provided for the dataset and building script here on HuggingFace.**
- A persona–scenario summarizer/generator made with LimaRP data [also exists](https://huggingface.co/lemonilia/LimaRP-perscengen-v5).
- 🔑 The archive is password-protected. The password is `LimaRP`
## Known issues
LimaRP has a few notable issues, here in subjective decreasing order of severity.
- **Grammar and typos**. Although care has been put to reduce the amount of typos and grammatical errors,
they are still present to some extent (punctuation issues in particular). Automated AI-based grammar checking
with language models like [CoEdit](https://huggingface.co/grammarly/coedit-xl) could be performed, but the
results would then have to be manually validated since these models often tend to correct more than necessary,
which can be undesirable in dialogues, as well as being avoidable manual work. Some data sources (threads)
show a larger amount of grammatical issues than others, and for those this could be an acceptable tradeoff
if they're worth saving.
- **Dullness**. Overall the conversations may feel too polite or even dull in some aspects. This might be due to
various reasons, but the main one is probably that most are from generally moderately well-written
"vanilla" ERP where people try to be respectful with each other. More _noncon_ and/or _extreme_
content may be needed to reduce the general "politeness" of the conversational data, spice it up.
- **Compiling errors**. While the provided script performs a certain amount of validation checks,
there may still be instances where due to human error utterances have been assigned the wrong label,
or placeholder names been assigned to the wrong character. The former issue is more likely to have
happened in the first (4k-context) ~1000 training samples (`data-short`). The data needs to be carefully
checked to make sure that no issue in this regard exists.
- **Repetitive and inaccurate descriptions**. While conversations are almost entirely human-generated,
character information and scenario exhibit `gpt-4`-isms and can be repetitive, lack depth and miss certain character traits; manual
editing will be needed to make them more human-like and respond to more specialized personality
traits and keywords—as a result, LimaRP-generated text may appear to ignore certain character traits.
A more powerful personality summarizer capable of being both accurate while generating sufficiently
long descriptions could be conceived for solving this issue.
- **Lack of instructions**. No instruction data whatsoever is present in the dataset. While the initial plan
was only making it focused on conversations, in retrospect a minimal amount of instruction-oriented roleplay data
could be beneficial in making the dataset able to better stand up on its own feet, without the need
for merging the data with smarter models or mixing it with external instruction datasets.
- **Name biases**. Character names may need to be diversified to remove potentially undesirable bias. In other words,
certain names may have ended up getting associated with certain personalities since they have been used
more frequently than others.
- **Lack of diversity**. In general, more focus needs to be put on improving conversation diversity. The total
number of conversations may have been excessively padded up, as several long conversations that
couldn't fit within the 4k/8k tokens target have been split into multiple ones (on the other hand,
Persona and Scenario data was never reused).
- **Poor dataset building script**. The Python script for building the dataset, although working, is not great
quality-wise and not particularly efficient.
- **Possible sources of impersonation**. Several of the conversations in the 8k set feature participants consistently
playing the role of _two_ characters at the same time. Character names in these files (which include the suffix `_MULTI`
or `_GROUP` in the filename) have been assigned a name with the format `Char1&Char2`. Testing didn't reveal issues
with this, but it's something to keep in mind if more severe impersonation problems occur compared to the initial
release of LimaRP. Furthermore, in a few conversations additional characters (roleplayed by either of the two users)
may also temporarily participate to the story. These have often (but not always) been assigned a `_BAD` tag in the filename.
- **Gender confusion sources**. Some conversations feature "futanari" or "transgender" content. These have been found
to confuse small-scale models to a certain extent. All source files have a `content` field and in most cases they
contain keywords like `shemale`, `futa`, `futanari`, `trans`, `transgender` when relevant to assist filtering.
## Conversation data form
Only one format has been used: **forum/novel-style**. This includes:
- Quotation marks for dialogues;
- Narration in third person, simple past form, without delimiters;
Other RP styles have been excluded, and messages showing them have been fixed when possible and feasible.
### Format details
- Narration does not have any delimiter.
- `Jessica looked at Mark with disdain.`
- Dialogues are enclosed with ASCII double quotation marks.
- `"I say this."`
- Onomatopoeias are enclosed with asterisks.
- `*thud*`
- Inner thoughts are enclosed with underscores.
- `_What is he doing?_`
- Non-dialogue quotes are enclosed with two apostrophes on each side (caveat: not all have been converted in this way).
- `''The Jungle Book''`
- Punctuation has been normalized. Fancy quotes has been converted to the ASCII equivalent, ellipses always
turned into a standard format (`...` with a trailing space when a word follows) and em-dashes always converted
to three consecutive dashes (`---`) without any surrounding space.
- For stylistic reasons, when building the dataset em-dash surrogates get converted to their UTF-8 symbol (`—`).
- Placeholder names have been used for the characters, even within the messages, whenever possible. `<FIRST>` is always
assumed to be the bot/model, and `<SECOND>` always assumed to be the human/user. All conversations terminate with
a message by `<FIRST>`.
- When building the dataset, placeholder names currently get converted to the ones actually used in
the RP conversations.
## Data sources
Weights are naively calculated in terms of bytes for the entire conversation files as of 2023-11-10.
Source|Notes|Weight
-----|-----|-----:
All The Fallen|Registration required|5.1%
Black Dahlia Roleplaying|Registration required, 18+ characters only|0.9%
Blue Moon Roleplaying|Mostly open-access, Lolisho forbidden|18.4%
Darknest Fantasy|Registration required, 18+ characters only|0.2%
Eka's Portal|Open-access|1.6%
Elliquiy|Approval required, Lolisho forbidden|50.8%
Lolicit|Registration required, Defunct website|10.5%
Redlight Ponyville|Approval required|0.6%
The Inner Sanctum|Registration required, 18+ characters only|11.8%
Note that users are required to be 18+ to write in the listed ERP forums or forum subsections.
Usernames, OOC and other personal information have **not** been included in the training data, only the
names of the roleplayed characters as used in the conversations (or sometimes with minor changes).
## Some lessons learned while making LimaRP
- There is indeed no real need for a large amount of data to give models a strong bias towards producing
roleplaying text with good reliability in respecting user/bot turns. The 2000 training rows could even be
trimmed in number with likely similar results.
- Incidentally, roughly 2000 training examples are also about the limit that can still be finetuned in
reasonable amounts of time on a single 24GB consumer GPU at a decent context length (in the case of
13B models).
- Data quality beats quantity, but ensuring good quality is very difficult without the help of unrestricted
powerful LLMs and/or extensive human intervention.
- Remaining focused on a strict set of rules with a clear goal and consistently adding a relatively small
number of training examples on a daily basis were a driving force for completing the dataset.
- In retrospect, the RP conversation processing pipeline could have been improved and better planned, or made
more efficient.
- Finetuning the dataset in "completion" mode (on the entire chat) may have been part of the reasons why
it was so effective in altering model outputs, whether finetuned from scratch or applied as a LoRA.
- The initial GPT-3.5/4 "seed" data for generating Persona and Scenario was crucial in the initial stages
of the project, but once a dedicated summarizer was trained from that data, access to more powerful LLMs
became for the most part unnecessary.
- Compiling and verifying the data manually took much more work than anticipated. This should have been
a group effort.
- In the end, advances in base model performance, increases in context size (so far from 2k tokens of
the original LLaMA to 8k tokens or more of Mistral and other recent models) and improvements in ICL
(in-context learning) capabilities may eventually render finetuning on datasets like LimaRP unnecessary
except for zero-shot RP performance or cases where models have a strong built-in alignment preventing ERP.
---
# Appendix
## Additional points of improvement
Ideas in random order that could be applied for improving the dataset. Some have been already
mentioned earlier.
- Recheck old conversations and consolidate them if short
- Carefully watch for wrongly assigned character names and labels
- Rewrite scenario and personality to be more human-like and diverse
- Include the instruction at various locations in the conversation (e.g. at the end), or multiple times
- May be performed just for "Scenario" to make the model learn to change the direction of the conversation
- Classify message "safety" (sexual/non-sexual, etc)
- Perhaps using some external LLM and over a continuous range rather than just two discrete classifications
- Add more "Exaggerated" personalities
- Anime/Japanese character tropes, etc.
- Include other information during finetuning
- Inferred character motivations, psychology, notes, etc.
- Could also include summarized (word list) traits in "Personality", making Persona
as a general "Description"
- These could go in the system instruction in a way to boost the model's reasoning capabilities.
- Chain-of-Thought-like indications in long-form descriptions:
"Since the character is/does X, he is Y", etc.
- Add focused RP-oriented small instruction dataset
- Low amount of turns on very specific instructions
- Greentext, logic, puzzles, etc.
- Truly randomized, gender-neutral names in the training data
- Could avoid avoid name bias
- Could be implemented as an "Alternative" name field in the source files
- An easy alternative would be instead simply using standardized names like USER and CHAR—never tried
with the full dataset.
- Use when finetuning message length hints in `tokens/10`
- Could be more precise and easier to use than fuzzy lengths
- Include human training data from books in the form of "Questions and Answers" or interviews
- Make a greater use of what Markdown formatting offers, while maintaining the novel-style narration | lemonilia/LimaRP | [
"task_categories:conversational",
"task_categories:summarization",
"size_categories:1K<n<10K",
"language:en",
"license:apache-2.0",
"not-for-all-audiences",
"region:us"
]
| 2023-11-09T15:43:01+00:00 | {"language": ["en"], "license": "apache-2.0", "size_categories": ["1K<n<10K"], "task_categories": ["conversational", "summarization"], "pretty_name": "LimaRP", "tags": ["not-for-all-audiences"]} | 2023-11-11T19:00:51+00:00 | []
| [
"en"
]
| TAGS
#task_categories-conversational #task_categories-summarization #size_categories-1K<n<10K #language-English #license-apache-2.0 #not-for-all-audiences #region-us
| LIMA ERP data (LimaRP)
======================
Following the principles highlighted in arXiv:2305.11206 by Zhou et al.
and replicated in some aspects by Kaiokendev with SuperHOT,
the archive in this repository contains about 2000 manually selected and curated 1-on-1 human-human
roleplaying conversations and associated LLM-generated persona and scenario data. The RP conversations
all feature only two human participants, although occasionally the participants may play the role of more
than one character.
The conversation data is in the form of source files in .yaml format + basic Python script for building the
dataset, intended to be finetuned in "completion" format (similar to unsupervised finetuning).
Having reached the minimum number of examples suggested in the LIMA paper and after putting overall probably more
than 500 hours of work on manually gathering and curating the data, LimaRP can be considered a finished project
at this point in time. Future work (cleaning, trimming, expansion) would require more resources and community help.
### Notes
* Be aware that although retrieved solely from age-restricted (18+) internet forums, the data contains
roleplaying elements and topics that may be considered extreme, distasteful, shocking,
inappropriate and disturbing. *Do not* download it if you're not sure of the legal ramifications of
possessing fictional *written* content of *any* kind in your country.
* The first ~500 samples were designed to be trained with a 2048 tokens context size; the following 500 with
a 4096 tokens context size or greater. The later training samples ('data-long') were designed for an 8192
tokens context size. Note that while the 8k samples can be reduced to 4k size, this can confuse the model to
some extent, as scenario and persona data may end up referring to events removed from the context.
* Please note that no support will be provided for the dataset and building script here on HuggingFace.
* A persona–scenario summarizer/generator made with LimaRP data also exists.
* The archive is password-protected. The password is 'LimaRP'
Known issues
------------
LimaRP has a few notable issues, here in subjective decreasing order of severity.
* Grammar and typos. Although care has been put to reduce the amount of typos and grammatical errors,
they are still present to some extent (punctuation issues in particular). Automated AI-based grammar checking
with language models like CoEdit could be performed, but the
results would then have to be manually validated since these models often tend to correct more than necessary,
which can be undesirable in dialogues, as well as being avoidable manual work. Some data sources (threads)
show a larger amount of grammatical issues than others, and for those this could be an acceptable tradeoff
if they're worth saving.
* Dullness. Overall the conversations may feel too polite or even dull in some aspects. This might be due to
various reasons, but the main one is probably that most are from generally moderately well-written
"vanilla" ERP where people try to be respectful with each other. More *noncon* and/or *extreme*
content may be needed to reduce the general "politeness" of the conversational data, spice it up.
* Compiling errors. While the provided script performs a certain amount of validation checks,
there may still be instances where due to human error utterances have been assigned the wrong label,
or placeholder names been assigned to the wrong character. The former issue is more likely to have
happened in the first (4k-context) ~1000 training samples ('data-short'). The data needs to be carefully
checked to make sure that no issue in this regard exists.
* Repetitive and inaccurate descriptions. While conversations are almost entirely human-generated,
character information and scenario exhibit 'gpt-4'-isms and can be repetitive, lack depth and miss certain character traits; manual
editing will be needed to make them more human-like and respond to more specialized personality
traits and keywords—as a result, LimaRP-generated text may appear to ignore certain character traits.
A more powerful personality summarizer capable of being both accurate while generating sufficiently
long descriptions could be conceived for solving this issue.
* Lack of instructions. No instruction data whatsoever is present in the dataset. While the initial plan
was only making it focused on conversations, in retrospect a minimal amount of instruction-oriented roleplay data
could be beneficial in making the dataset able to better stand up on its own feet, without the need
for merging the data with smarter models or mixing it with external instruction datasets.
* Name biases. Character names may need to be diversified to remove potentially undesirable bias. In other words,
certain names may have ended up getting associated with certain personalities since they have been used
more frequently than others.
* Lack of diversity. In general, more focus needs to be put on improving conversation diversity. The total
number of conversations may have been excessively padded up, as several long conversations that
couldn't fit within the 4k/8k tokens target have been split into multiple ones (on the other hand,
Persona and Scenario data was never reused).
* Poor dataset building script. The Python script for building the dataset, although working, is not great
quality-wise and not particularly efficient.
* Possible sources of impersonation. Several of the conversations in the 8k set feature participants consistently
playing the role of *two* characters at the same time. Character names in these files (which include the suffix '\_MULTI'
or '\_GROUP' in the filename) have been assigned a name with the format 'Char1&Char2'. Testing didn't reveal issues
with this, but it's something to keep in mind if more severe impersonation problems occur compared to the initial
release of LimaRP. Furthermore, in a few conversations additional characters (roleplayed by either of the two users)
may also temporarily participate to the story. These have often (but not always) been assigned a '\_BAD' tag in the filename.
* Gender confusion sources. Some conversations feature "futanari" or "transgender" content. These have been found
to confuse small-scale models to a certain extent. All source files have a 'content' field and in most cases they
contain keywords like 'shemale', 'futa', 'futanari', 'trans', 'transgender' when relevant to assist filtering.
Conversation data form
----------------------
Only one format has been used: forum/novel-style. This includes:
* Quotation marks for dialogues;
* Narration in third person, simple past form, without delimiters;
Other RP styles have been excluded, and messages showing them have been fixed when possible and feasible.
### Format details
* Narration does not have any delimiter.
+ 'Jessica looked at Mark with disdain.'
* Dialogues are enclosed with ASCII double quotation marks.
+ '"I say this."'
* Onomatopoeias are enclosed with asterisks.
+ '*thud*'
* Inner thoughts are enclosed with underscores.
+ '*What is he doing?*'
* Non-dialogue quotes are enclosed with two apostrophes on each side (caveat: not all have been converted in this way).
+ '''The Jungle Book'''
* Punctuation has been normalized. Fancy quotes has been converted to the ASCII equivalent, ellipses always
turned into a standard format ('...' with a trailing space when a word follows) and em-dashes always converted
to three consecutive dashes ('---') without any surrounding space.
+ For stylistic reasons, when building the dataset em-dash surrogates get converted to their UTF-8 symbol ('—').
* Placeholder names have been used for the characters, even within the messages, whenever possible. '' is always
assumed to be the bot/model, and '' always assumed to be the human/user. All conversations terminate with
a message by ''.
+ When building the dataset, placeholder names currently get converted to the ones actually used in
the RP conversations.
Data sources
------------
Weights are naively calculated in terms of bytes for the entire conversation files as of 2023-11-10.
Note that users are required to be 18+ to write in the listed ERP forums or forum subsections.
Usernames, OOC and other personal information have not been included in the training data, only the
names of the roleplayed characters as used in the conversations (or sometimes with minor changes).
Some lessons learned while making LimaRP
----------------------------------------
* There is indeed no real need for a large amount of data to give models a strong bias towards producing
roleplaying text with good reliability in respecting user/bot turns. The 2000 training rows could even be
trimmed in number with likely similar results.
* Incidentally, roughly 2000 training examples are also about the limit that can still be finetuned in
reasonable amounts of time on a single 24GB consumer GPU at a decent context length (in the case of
13B models).
* Data quality beats quantity, but ensuring good quality is very difficult without the help of unrestricted
powerful LLMs and/or extensive human intervention.
* Remaining focused on a strict set of rules with a clear goal and consistently adding a relatively small
number of training examples on a daily basis were a driving force for completing the dataset.
* In retrospect, the RP conversation processing pipeline could have been improved and better planned, or made
more efficient.
* Finetuning the dataset in "completion" mode (on the entire chat) may have been part of the reasons why
it was so effective in altering model outputs, whether finetuned from scratch or applied as a LoRA.
* The initial GPT-3.5/4 "seed" data for generating Persona and Scenario was crucial in the initial stages
of the project, but once a dedicated summarizer was trained from that data, access to more powerful LLMs
became for the most part unnecessary.
* Compiling and verifying the data manually took much more work than anticipated. This should have been
a group effort.
* In the end, advances in base model performance, increases in context size (so far from 2k tokens of
the original LLaMA to 8k tokens or more of Mistral and other recent models) and improvements in ICL
(in-context learning) capabilities may eventually render finetuning on datasets like LimaRP unnecessary
except for zero-shot RP performance or cases where models have a strong built-in alignment preventing ERP.
---
Appendix
========
Additional points of improvement
--------------------------------
Ideas in random order that could be applied for improving the dataset. Some have been already
mentioned earlier.
* Recheck old conversations and consolidate them if short
+ Carefully watch for wrongly assigned character names and labels
* Rewrite scenario and personality to be more human-like and diverse
* Include the instruction at various locations in the conversation (e.g. at the end), or multiple times
+ May be performed just for "Scenario" to make the model learn to change the direction of the conversation
* Classify message "safety" (sexual/non-sexual, etc)
+ Perhaps using some external LLM and over a continuous range rather than just two discrete classifications
* Add more "Exaggerated" personalities
+ Anime/Japanese character tropes, etc.
* Include other information during finetuning
+ Inferred character motivations, psychology, notes, etc.
+ Could also include summarized (word list) traits in "Personality", making Persona
as a general "Description"
+ These could go in the system instruction in a way to boost the model's reasoning capabilities.
+ Chain-of-Thought-like indications in long-form descriptions:
"Since the character is/does X, he is Y", etc.
* Add focused RP-oriented small instruction dataset
+ Low amount of turns on very specific instructions
+ Greentext, logic, puzzles, etc.
* Truly randomized, gender-neutral names in the training data
+ Could avoid avoid name bias
+ Could be implemented as an "Alternative" name field in the source files
+ An easy alternative would be instead simply using standardized names like USER and CHAR—never tried
with the full dataset.
* Use when finetuning message length hints in 'tokens/10'
+ Could be more precise and easier to use than fuzzy lengths
* Include human training data from books in the form of "Questions and Answers" or interviews
* Make a greater use of what Markdown formatting offers, while maintaining the novel-style narration
| [
"### Notes\n\n\n* Be aware that although retrieved solely from age-restricted (18+) internet forums, the data contains\nroleplaying elements and topics that may be considered extreme, distasteful, shocking,\ninappropriate and disturbing. *Do not* download it if you're not sure of the legal ramifications of\npossessing fictional *written* content of *any* kind in your country.\n* The first ~500 samples were designed to be trained with a 2048 tokens context size; the following 500 with\na 4096 tokens context size or greater. The later training samples ('data-long') were designed for an 8192\ntokens context size. Note that while the 8k samples can be reduced to 4k size, this can confuse the model to\nsome extent, as scenario and persona data may end up referring to events removed from the context.\n* Please note that no support will be provided for the dataset and building script here on HuggingFace.\n* A persona–scenario summarizer/generator made with LimaRP data also exists.\n* The archive is password-protected. The password is 'LimaRP'\n\n\nKnown issues\n------------\n\n\nLimaRP has a few notable issues, here in subjective decreasing order of severity.\n\n\n* Grammar and typos. Although care has been put to reduce the amount of typos and grammatical errors,\nthey are still present to some extent (punctuation issues in particular). Automated AI-based grammar checking\nwith language models like CoEdit could be performed, but the\nresults would then have to be manually validated since these models often tend to correct more than necessary,\nwhich can be undesirable in dialogues, as well as being avoidable manual work. Some data sources (threads)\nshow a larger amount of grammatical issues than others, and for those this could be an acceptable tradeoff\nif they're worth saving.\n* Dullness. Overall the conversations may feel too polite or even dull in some aspects. This might be due to\nvarious reasons, but the main one is probably that most are from generally moderately well-written\n\"vanilla\" ERP where people try to be respectful with each other. More *noncon* and/or *extreme*\ncontent may be needed to reduce the general \"politeness\" of the conversational data, spice it up.\n* Compiling errors. While the provided script performs a certain amount of validation checks,\nthere may still be instances where due to human error utterances have been assigned the wrong label,\nor placeholder names been assigned to the wrong character. The former issue is more likely to have\nhappened in the first (4k-context) ~1000 training samples ('data-short'). The data needs to be carefully\nchecked to make sure that no issue in this regard exists.\n* Repetitive and inaccurate descriptions. While conversations are almost entirely human-generated,\ncharacter information and scenario exhibit 'gpt-4'-isms and can be repetitive, lack depth and miss certain character traits; manual\nediting will be needed to make them more human-like and respond to more specialized personality\ntraits and keywords—as a result, LimaRP-generated text may appear to ignore certain character traits.\nA more powerful personality summarizer capable of being both accurate while generating sufficiently\nlong descriptions could be conceived for solving this issue.\n* Lack of instructions. No instruction data whatsoever is present in the dataset. While the initial plan\nwas only making it focused on conversations, in retrospect a minimal amount of instruction-oriented roleplay data\ncould be beneficial in making the dataset able to better stand up on its own feet, without the need\nfor merging the data with smarter models or mixing it with external instruction datasets.\n* Name biases. Character names may need to be diversified to remove potentially undesirable bias. In other words,\ncertain names may have ended up getting associated with certain personalities since they have been used\nmore frequently than others.\n* Lack of diversity. In general, more focus needs to be put on improving conversation diversity. The total\nnumber of conversations may have been excessively padded up, as several long conversations that\ncouldn't fit within the 4k/8k tokens target have been split into multiple ones (on the other hand,\nPersona and Scenario data was never reused).\n* Poor dataset building script. The Python script for building the dataset, although working, is not great\nquality-wise and not particularly efficient.\n* Possible sources of impersonation. Several of the conversations in the 8k set feature participants consistently\nplaying the role of *two* characters at the same time. Character names in these files (which include the suffix '\\_MULTI'\nor '\\_GROUP' in the filename) have been assigned a name with the format 'Char1&Char2'. Testing didn't reveal issues\nwith this, but it's something to keep in mind if more severe impersonation problems occur compared to the initial\nrelease of LimaRP. Furthermore, in a few conversations additional characters (roleplayed by either of the two users)\nmay also temporarily participate to the story. These have often (but not always) been assigned a '\\_BAD' tag in the filename.\n* Gender confusion sources. Some conversations feature \"futanari\" or \"transgender\" content. These have been found\nto confuse small-scale models to a certain extent. All source files have a 'content' field and in most cases they\ncontain keywords like 'shemale', 'futa', 'futanari', 'trans', 'transgender' when relevant to assist filtering.\n\n\nConversation data form\n----------------------\n\n\nOnly one format has been used: forum/novel-style. This includes:\n\n\n* Quotation marks for dialogues;\n* Narration in third person, simple past form, without delimiters;\n\n\nOther RP styles have been excluded, and messages showing them have been fixed when possible and feasible.",
"### Format details\n\n\n* Narration does not have any delimiter.\n\t+ 'Jessica looked at Mark with disdain.'\n* Dialogues are enclosed with ASCII double quotation marks.\n\t+ '\"I say this.\"'\n* Onomatopoeias are enclosed with asterisks.\n\t+ '*thud*'\n* Inner thoughts are enclosed with underscores.\n\t+ '*What is he doing?*'\n* Non-dialogue quotes are enclosed with two apostrophes on each side (caveat: not all have been converted in this way).\n\t+ '''The Jungle Book'''\n* Punctuation has been normalized. Fancy quotes has been converted to the ASCII equivalent, ellipses always\nturned into a standard format ('...' with a trailing space when a word follows) and em-dashes always converted\nto three consecutive dashes ('---') without any surrounding space.\n\t+ For stylistic reasons, when building the dataset em-dash surrogates get converted to their UTF-8 symbol ('—').\n* Placeholder names have been used for the characters, even within the messages, whenever possible. '' is always\nassumed to be the bot/model, and '' always assumed to be the human/user. All conversations terminate with\na message by ''.\n\n\t+ When building the dataset, placeholder names currently get converted to the ones actually used in\n\tthe RP conversations.\n\n\nData sources\n------------\n\n\nWeights are naively calculated in terms of bytes for the entire conversation files as of 2023-11-10.\n\n\n\nNote that users are required to be 18+ to write in the listed ERP forums or forum subsections.\n\n\nUsernames, OOC and other personal information have not been included in the training data, only the\nnames of the roleplayed characters as used in the conversations (or sometimes with minor changes).\n\n\nSome lessons learned while making LimaRP\n----------------------------------------\n\n\n* There is indeed no real need for a large amount of data to give models a strong bias towards producing\nroleplaying text with good reliability in respecting user/bot turns. The 2000 training rows could even be\ntrimmed in number with likely similar results.\n* Incidentally, roughly 2000 training examples are also about the limit that can still be finetuned in\nreasonable amounts of time on a single 24GB consumer GPU at a decent context length (in the case of\n13B models).\n* Data quality beats quantity, but ensuring good quality is very difficult without the help of unrestricted\npowerful LLMs and/or extensive human intervention.\n* Remaining focused on a strict set of rules with a clear goal and consistently adding a relatively small\nnumber of training examples on a daily basis were a driving force for completing the dataset.\n* In retrospect, the RP conversation processing pipeline could have been improved and better planned, or made\nmore efficient.\n* Finetuning the dataset in \"completion\" mode (on the entire chat) may have been part of the reasons why\nit was so effective in altering model outputs, whether finetuned from scratch or applied as a LoRA.\n* The initial GPT-3.5/4 \"seed\" data for generating Persona and Scenario was crucial in the initial stages\nof the project, but once a dedicated summarizer was trained from that data, access to more powerful LLMs\nbecame for the most part unnecessary.\n* Compiling and verifying the data manually took much more work than anticipated. This should have been\na group effort.\n* In the end, advances in base model performance, increases in context size (so far from 2k tokens of\nthe original LLaMA to 8k tokens or more of Mistral and other recent models) and improvements in ICL\n(in-context learning) capabilities may eventually render finetuning on datasets like LimaRP unnecessary\nexcept for zero-shot RP performance or cases where models have a strong built-in alignment preventing ERP.\n\n\n\n\n---\n\n\nAppendix\n========\n\n\nAdditional points of improvement\n--------------------------------\n\n\nIdeas in random order that could be applied for improving the dataset. Some have been already\nmentioned earlier.\n\n\n* Recheck old conversations and consolidate them if short\n\t+ Carefully watch for wrongly assigned character names and labels\n* Rewrite scenario and personality to be more human-like and diverse\n* Include the instruction at various locations in the conversation (e.g. at the end), or multiple times\n\t+ May be performed just for \"Scenario\" to make the model learn to change the direction of the conversation\n* Classify message \"safety\" (sexual/non-sexual, etc)\n\t+ Perhaps using some external LLM and over a continuous range rather than just two discrete classifications\n* Add more \"Exaggerated\" personalities\n\t+ Anime/Japanese character tropes, etc.\n* Include other information during finetuning\n\t+ Inferred character motivations, psychology, notes, etc.\n\t+ Could also include summarized (word list) traits in \"Personality\", making Persona\n\tas a general \"Description\"\n\t+ These could go in the system instruction in a way to boost the model's reasoning capabilities.\n\t+ Chain-of-Thought-like indications in long-form descriptions:\n\t\"Since the character is/does X, he is Y\", etc.\n* Add focused RP-oriented small instruction dataset\n\t+ Low amount of turns on very specific instructions\n\t+ Greentext, logic, puzzles, etc.\n* Truly randomized, gender-neutral names in the training data\n\t+ Could avoid avoid name bias\n\t+ Could be implemented as an \"Alternative\" name field in the source files\n\t+ An easy alternative would be instead simply using standardized names like USER and CHAR—never tried\n\twith the full dataset.\n* Use when finetuning message length hints in 'tokens/10'\n\t+ Could be more precise and easier to use than fuzzy lengths\n* Include human training data from books in the form of \"Questions and Answers\" or interviews\n* Make a greater use of what Markdown formatting offers, while maintaining the novel-style narration"
]
| [
"TAGS\n#task_categories-conversational #task_categories-summarization #size_categories-1K<n<10K #language-English #license-apache-2.0 #not-for-all-audiences #region-us \n",
"### Notes\n\n\n* Be aware that although retrieved solely from age-restricted (18+) internet forums, the data contains\nroleplaying elements and topics that may be considered extreme, distasteful, shocking,\ninappropriate and disturbing. *Do not* download it if you're not sure of the legal ramifications of\npossessing fictional *written* content of *any* kind in your country.\n* The first ~500 samples were designed to be trained with a 2048 tokens context size; the following 500 with\na 4096 tokens context size or greater. The later training samples ('data-long') were designed for an 8192\ntokens context size. Note that while the 8k samples can be reduced to 4k size, this can confuse the model to\nsome extent, as scenario and persona data may end up referring to events removed from the context.\n* Please note that no support will be provided for the dataset and building script here on HuggingFace.\n* A persona–scenario summarizer/generator made with LimaRP data also exists.\n* The archive is password-protected. The password is 'LimaRP'\n\n\nKnown issues\n------------\n\n\nLimaRP has a few notable issues, here in subjective decreasing order of severity.\n\n\n* Grammar and typos. Although care has been put to reduce the amount of typos and grammatical errors,\nthey are still present to some extent (punctuation issues in particular). Automated AI-based grammar checking\nwith language models like CoEdit could be performed, but the\nresults would then have to be manually validated since these models often tend to correct more than necessary,\nwhich can be undesirable in dialogues, as well as being avoidable manual work. Some data sources (threads)\nshow a larger amount of grammatical issues than others, and for those this could be an acceptable tradeoff\nif they're worth saving.\n* Dullness. Overall the conversations may feel too polite or even dull in some aspects. This might be due to\nvarious reasons, but the main one is probably that most are from generally moderately well-written\n\"vanilla\" ERP where people try to be respectful with each other. More *noncon* and/or *extreme*\ncontent may be needed to reduce the general \"politeness\" of the conversational data, spice it up.\n* Compiling errors. While the provided script performs a certain amount of validation checks,\nthere may still be instances where due to human error utterances have been assigned the wrong label,\nor placeholder names been assigned to the wrong character. The former issue is more likely to have\nhappened in the first (4k-context) ~1000 training samples ('data-short'). The data needs to be carefully\nchecked to make sure that no issue in this regard exists.\n* Repetitive and inaccurate descriptions. While conversations are almost entirely human-generated,\ncharacter information and scenario exhibit 'gpt-4'-isms and can be repetitive, lack depth and miss certain character traits; manual\nediting will be needed to make them more human-like and respond to more specialized personality\ntraits and keywords—as a result, LimaRP-generated text may appear to ignore certain character traits.\nA more powerful personality summarizer capable of being both accurate while generating sufficiently\nlong descriptions could be conceived for solving this issue.\n* Lack of instructions. No instruction data whatsoever is present in the dataset. While the initial plan\nwas only making it focused on conversations, in retrospect a minimal amount of instruction-oriented roleplay data\ncould be beneficial in making the dataset able to better stand up on its own feet, without the need\nfor merging the data with smarter models or mixing it with external instruction datasets.\n* Name biases. Character names may need to be diversified to remove potentially undesirable bias. In other words,\ncertain names may have ended up getting associated with certain personalities since they have been used\nmore frequently than others.\n* Lack of diversity. In general, more focus needs to be put on improving conversation diversity. The total\nnumber of conversations may have been excessively padded up, as several long conversations that\ncouldn't fit within the 4k/8k tokens target have been split into multiple ones (on the other hand,\nPersona and Scenario data was never reused).\n* Poor dataset building script. The Python script for building the dataset, although working, is not great\nquality-wise and not particularly efficient.\n* Possible sources of impersonation. Several of the conversations in the 8k set feature participants consistently\nplaying the role of *two* characters at the same time. Character names in these files (which include the suffix '\\_MULTI'\nor '\\_GROUP' in the filename) have been assigned a name with the format 'Char1&Char2'. Testing didn't reveal issues\nwith this, but it's something to keep in mind if more severe impersonation problems occur compared to the initial\nrelease of LimaRP. Furthermore, in a few conversations additional characters (roleplayed by either of the two users)\nmay also temporarily participate to the story. These have often (but not always) been assigned a '\\_BAD' tag in the filename.\n* Gender confusion sources. Some conversations feature \"futanari\" or \"transgender\" content. These have been found\nto confuse small-scale models to a certain extent. All source files have a 'content' field and in most cases they\ncontain keywords like 'shemale', 'futa', 'futanari', 'trans', 'transgender' when relevant to assist filtering.\n\n\nConversation data form\n----------------------\n\n\nOnly one format has been used: forum/novel-style. This includes:\n\n\n* Quotation marks for dialogues;\n* Narration in third person, simple past form, without delimiters;\n\n\nOther RP styles have been excluded, and messages showing them have been fixed when possible and feasible.",
"### Format details\n\n\n* Narration does not have any delimiter.\n\t+ 'Jessica looked at Mark with disdain.'\n* Dialogues are enclosed with ASCII double quotation marks.\n\t+ '\"I say this.\"'\n* Onomatopoeias are enclosed with asterisks.\n\t+ '*thud*'\n* Inner thoughts are enclosed with underscores.\n\t+ '*What is he doing?*'\n* Non-dialogue quotes are enclosed with two apostrophes on each side (caveat: not all have been converted in this way).\n\t+ '''The Jungle Book'''\n* Punctuation has been normalized. Fancy quotes has been converted to the ASCII equivalent, ellipses always\nturned into a standard format ('...' with a trailing space when a word follows) and em-dashes always converted\nto three consecutive dashes ('---') without any surrounding space.\n\t+ For stylistic reasons, when building the dataset em-dash surrogates get converted to their UTF-8 symbol ('—').\n* Placeholder names have been used for the characters, even within the messages, whenever possible. '' is always\nassumed to be the bot/model, and '' always assumed to be the human/user. All conversations terminate with\na message by ''.\n\n\t+ When building the dataset, placeholder names currently get converted to the ones actually used in\n\tthe RP conversations.\n\n\nData sources\n------------\n\n\nWeights are naively calculated in terms of bytes for the entire conversation files as of 2023-11-10.\n\n\n\nNote that users are required to be 18+ to write in the listed ERP forums or forum subsections.\n\n\nUsernames, OOC and other personal information have not been included in the training data, only the\nnames of the roleplayed characters as used in the conversations (or sometimes with minor changes).\n\n\nSome lessons learned while making LimaRP\n----------------------------------------\n\n\n* There is indeed no real need for a large amount of data to give models a strong bias towards producing\nroleplaying text with good reliability in respecting user/bot turns. The 2000 training rows could even be\ntrimmed in number with likely similar results.\n* Incidentally, roughly 2000 training examples are also about the limit that can still be finetuned in\nreasonable amounts of time on a single 24GB consumer GPU at a decent context length (in the case of\n13B models).\n* Data quality beats quantity, but ensuring good quality is very difficult without the help of unrestricted\npowerful LLMs and/or extensive human intervention.\n* Remaining focused on a strict set of rules with a clear goal and consistently adding a relatively small\nnumber of training examples on a daily basis were a driving force for completing the dataset.\n* In retrospect, the RP conversation processing pipeline could have been improved and better planned, or made\nmore efficient.\n* Finetuning the dataset in \"completion\" mode (on the entire chat) may have been part of the reasons why\nit was so effective in altering model outputs, whether finetuned from scratch or applied as a LoRA.\n* The initial GPT-3.5/4 \"seed\" data for generating Persona and Scenario was crucial in the initial stages\nof the project, but once a dedicated summarizer was trained from that data, access to more powerful LLMs\nbecame for the most part unnecessary.\n* Compiling and verifying the data manually took much more work than anticipated. This should have been\na group effort.\n* In the end, advances in base model performance, increases in context size (so far from 2k tokens of\nthe original LLaMA to 8k tokens or more of Mistral and other recent models) and improvements in ICL\n(in-context learning) capabilities may eventually render finetuning on datasets like LimaRP unnecessary\nexcept for zero-shot RP performance or cases where models have a strong built-in alignment preventing ERP.\n\n\n\n\n---\n\n\nAppendix\n========\n\n\nAdditional points of improvement\n--------------------------------\n\n\nIdeas in random order that could be applied for improving the dataset. Some have been already\nmentioned earlier.\n\n\n* Recheck old conversations and consolidate them if short\n\t+ Carefully watch for wrongly assigned character names and labels\n* Rewrite scenario and personality to be more human-like and diverse\n* Include the instruction at various locations in the conversation (e.g. at the end), or multiple times\n\t+ May be performed just for \"Scenario\" to make the model learn to change the direction of the conversation\n* Classify message \"safety\" (sexual/non-sexual, etc)\n\t+ Perhaps using some external LLM and over a continuous range rather than just two discrete classifications\n* Add more \"Exaggerated\" personalities\n\t+ Anime/Japanese character tropes, etc.\n* Include other information during finetuning\n\t+ Inferred character motivations, psychology, notes, etc.\n\t+ Could also include summarized (word list) traits in \"Personality\", making Persona\n\tas a general \"Description\"\n\t+ These could go in the system instruction in a way to boost the model's reasoning capabilities.\n\t+ Chain-of-Thought-like indications in long-form descriptions:\n\t\"Since the character is/does X, he is Y\", etc.\n* Add focused RP-oriented small instruction dataset\n\t+ Low amount of turns on very specific instructions\n\t+ Greentext, logic, puzzles, etc.\n* Truly randomized, gender-neutral names in the training data\n\t+ Could avoid avoid name bias\n\t+ Could be implemented as an \"Alternative\" name field in the source files\n\t+ An easy alternative would be instead simply using standardized names like USER and CHAR—never tried\n\twith the full dataset.\n* Use when finetuning message length hints in 'tokens/10'\n\t+ Could be more precise and easier to use than fuzzy lengths\n* Include human training data from books in the form of \"Questions and Answers\" or interviews\n* Make a greater use of what Markdown formatting offers, while maintaining the novel-style narration"
]
| [
59,
1355,
1363
]
| [
"passage: TAGS\n#task_categories-conversational #task_categories-summarization #size_categories-1K<n<10K #language-English #license-apache-2.0 #not-for-all-audiences #region-us \n",
"passage: ### Notes\n\n\n* Be aware that although retrieved solely from age-restricted (18+) internet forums, the data contains\nroleplaying elements and topics that may be considered extreme, distasteful, shocking,\ninappropriate and disturbing. *Do not* download it if you're not sure of the legal ramifications of\npossessing fictional *written* content of *any* kind in your country.\n* The first ~500 samples were designed to be trained with a 2048 tokens context size; the following 500 with\na 4096 tokens context size or greater. The later training samples ('data-long') were designed for an 8192\ntokens context size. Note that while the 8k samples can be reduced to 4k size, this can confuse the model to\nsome extent, as scenario and persona data may end up referring to events removed from the context.\n* Please note that no support will be provided for the dataset and building script here on HuggingFace.\n* A persona–scenario summarizer/generator made with LimaRP data also exists.\n* The archive is password-protected. The password is 'LimaRP'\n\n\nKnown issues\n------------\n\n\nLimaRP has a few notable issues, here in subjective decreasing order of severity.\n\n\n* Grammar and typos. Although care has been put to reduce the amount of typos and grammatical errors,\nthey are still present to some extent (punctuation issues in particular). Automated AI-based grammar checking\nwith language models like CoEdit could be performed, but the\nresults would then have to be manually validated since these models often tend to correct more than necessary,\nwhich can be undesirable in dialogues, as well as being avoidable manual work. Some data sources (threads)\nshow a larger amount of grammatical issues than others, and for those this could be an acceptable tradeoff\nif they're worth saving.\n* Dullness. Overall the conversations may feel too polite or even dull in some aspects. This might be due to\nvarious reasons, but the main one is probably that most are from generally moderately well-written\n\"vanilla\" ERP where people try to be respectful with each other. More *noncon* and/or *extreme*\ncontent may be needed to reduce the general \"politeness\" of the conversational data, spice it up.\n* Compiling errors. While the provided script performs a certain amount of validation checks,\nthere may still be instances where due to human error utterances have been assigned the wrong label,\nor placeholder names been assigned to the wrong character. The former issue is more likely to have\nhappened in the first (4k-context) ~1000 training samples ('data-short'). The data needs to be carefully\nchecked to make sure that no issue in this regard exists.\n* Repetitive and inaccurate descriptions. While conversations are almost entirely human-generated,\ncharacter information and scenario exhibit 'gpt-4'-isms and can be repetitive, lack depth and miss certain character traits; manual\nediting will be needed to make them more human-like and respond to more specialized personality\ntraits and keywords—as a result, LimaRP-generated text may appear to ignore certain character traits.\nA more powerful personality summarizer capable of being both accurate while generating sufficiently\nlong descriptions could be conceived for solving this issue.\n* Lack of instructions. No instruction data whatsoever is present in the dataset. While the initial plan\nwas only making it focused on conversations, in retrospect a minimal amount of instruction-oriented roleplay data\ncould be beneficial in making the dataset able to better stand up on its own feet, without the need\nfor merging the data with smarter models or mixing it with external instruction datasets.\n* Name biases. Character names may need to be diversified to remove potentially undesirable bias. In other words,\ncertain names may have ended up getting associated with certain personalities since they have been used\nmore frequently than others.\n* Lack of diversity. In general, more focus needs to be put on improving conversation diversity. The total\nnumber of conversations may have been excessively padded up, as several long conversations that\ncouldn't fit within the 4k/8k tokens target have been split into multiple ones (on the other hand,\nPersona and Scenario data was never reused).\n* Poor dataset building script. The Python script for building the dataset, although working, is not great\nquality-wise and not particularly efficient.\n* Possible sources of impersonation. Several of the conversations in the 8k set feature participants consistently\nplaying the role of *two* characters at the same time. Character names in these files (which include the suffix '\\_MULTI'\nor '\\_GROUP' in the filename) have been assigned a name with the format 'Char1&Char2'. Testing didn't reveal issues\nwith this, but it's something to keep in mind if more severe impersonation problems occur compared to the initial\nrelease of LimaRP. Furthermore, in a few conversations additional characters (roleplayed by either of the two users)\nmay also temporarily participate to the story. These have often (but not always) been assigned a '\\_BAD' tag in the filename.\n* Gender confusion sources. Some conversations feature \"futanari\" or \"transgender\" content. These have been found\nto confuse small-scale models to a certain extent. All source files have a 'content' field and in most cases they\ncontain keywords like 'shemale', 'futa', 'futanari', 'trans', 'transgender' when relevant to assist filtering.\n\n\nConversation data form\n----------------------\n\n\nOnly one format has been used: forum/novel-style. This includes:\n\n\n* Quotation marks for dialogues;\n* Narration in third person, simple past form, without delimiters;\n\n\nOther RP styles have been excluded, and messages showing them have been fixed when possible and feasible."
]
|
e914e440e992cb8e7b87062ef95bbae984bd22ba |
# LLaVA-Plus Instructed Dataset Card
## Dataset details
**Dataset type:**
LLaVA-Plus-v1-117K is a set of GPT-generated multimodal tool-augmented instruction-following data.
It is constructed for tool use to build large multimodal agents with GPT-4-plus vision/language capability.
**Dataset date:**
LLaVA-Plus-v1-117K was collected in Sep 2023, by prompting ChatGPT/GPT-4-0314 API.
**Paper or resources for more information:**
https://llava-vl.github.io/llava-plus
**License:**
Attribution-NonCommercial 4.0 International
It should abide by the policy of OpenAI: https://openai.com/policies/terms-of-use
**Where to send questions or comments about the model:**
https://github.com/LLaVA-VL/LLaVA-Plus-Codebase/issues
## Intended use
**Primary intended uses:**
The primary use of LLaVA-Plus is research on large multimodal agents, and chatbots.
**Primary intended users:**
The primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence. | LLaVA-VL/llava-plus-data | [
"task_categories:visual-question-answering",
"task_categories:question-answering",
"size_categories:100K<n<1M",
"language:en",
"license:cc-by-nc-4.0",
"region:us"
]
| 2023-11-09T15:47:07+00:00 | {"language": ["en"], "license": "cc-by-nc-4.0", "size_categories": ["100K<n<1M"], "task_categories": ["visual-question-answering", "question-answering"], "pretty_name": "LLaVA-Plus-v1-117K"} | 2023-11-09T16:04:57+00:00 | []
| [
"en"
]
| TAGS
#task_categories-visual-question-answering #task_categories-question-answering #size_categories-100K<n<1M #language-English #license-cc-by-nc-4.0 #region-us
|
# LLaVA-Plus Instructed Dataset Card
## Dataset details
Dataset type:
LLaVA-Plus-v1-117K is a set of GPT-generated multimodal tool-augmented instruction-following data.
It is constructed for tool use to build large multimodal agents with GPT-4-plus vision/language capability.
Dataset date:
LLaVA-Plus-v1-117K was collected in Sep 2023, by prompting ChatGPT/GPT-4-0314 API.
Paper or resources for more information:
URL
License:
Attribution-NonCommercial 4.0 International
It should abide by the policy of OpenAI: URL
Where to send questions or comments about the model:
URL
## Intended use
Primary intended uses:
The primary use of LLaVA-Plus is research on large multimodal agents, and chatbots.
Primary intended users:
The primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence. | [
"# LLaVA-Plus Instructed Dataset Card",
"## Dataset details\n\nDataset type:\nLLaVA-Plus-v1-117K is a set of GPT-generated multimodal tool-augmented instruction-following data.\nIt is constructed for tool use to build large multimodal agents with GPT-4-plus vision/language capability.\n\nDataset date:\nLLaVA-Plus-v1-117K was collected in Sep 2023, by prompting ChatGPT/GPT-4-0314 API.\n\nPaper or resources for more information:\nURL\n\nLicense:\nAttribution-NonCommercial 4.0 International\nIt should abide by the policy of OpenAI: URL\n\nWhere to send questions or comments about the model:\nURL",
"## Intended use\nPrimary intended uses:\nThe primary use of LLaVA-Plus is research on large multimodal agents, and chatbots.\n\nPrimary intended users:\nThe primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence."
]
| [
"TAGS\n#task_categories-visual-question-answering #task_categories-question-answering #size_categories-100K<n<1M #language-English #license-cc-by-nc-4.0 #region-us \n",
"# LLaVA-Plus Instructed Dataset Card",
"## Dataset details\n\nDataset type:\nLLaVA-Plus-v1-117K is a set of GPT-generated multimodal tool-augmented instruction-following data.\nIt is constructed for tool use to build large multimodal agents with GPT-4-plus vision/language capability.\n\nDataset date:\nLLaVA-Plus-v1-117K was collected in Sep 2023, by prompting ChatGPT/GPT-4-0314 API.\n\nPaper or resources for more information:\nURL\n\nLicense:\nAttribution-NonCommercial 4.0 International\nIt should abide by the policy of OpenAI: URL\n\nWhere to send questions or comments about the model:\nURL",
"## Intended use\nPrimary intended uses:\nThe primary use of LLaVA-Plus is research on large multimodal agents, and chatbots.\n\nPrimary intended users:\nThe primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence."
]
| [
60,
11,
144,
69
]
| [
"passage: TAGS\n#task_categories-visual-question-answering #task_categories-question-answering #size_categories-100K<n<1M #language-English #license-cc-by-nc-4.0 #region-us \n# LLaVA-Plus Instructed Dataset Card## Dataset details\n\nDataset type:\nLLaVA-Plus-v1-117K is a set of GPT-generated multimodal tool-augmented instruction-following data.\nIt is constructed for tool use to build large multimodal agents with GPT-4-plus vision/language capability.\n\nDataset date:\nLLaVA-Plus-v1-117K was collected in Sep 2023, by prompting ChatGPT/GPT-4-0314 API.\n\nPaper or resources for more information:\nURL\n\nLicense:\nAttribution-NonCommercial 4.0 International\nIt should abide by the policy of OpenAI: URL\n\nWhere to send questions or comments about the model:\nURL## Intended use\nPrimary intended uses:\nThe primary use of LLaVA-Plus is research on large multimodal agents, and chatbots.\n\nPrimary intended users:\nThe primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence."
]
|
87f62766d3e3e96615edf76161d93eba1677c371 | # Dataset Card for "bw_spec_cls_80_29"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_29 | [
"region:us"
]
| 2023-11-09T15:48:51+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "67784", "1": "67793", "2": "67829", "3": "68353", "4": "68354", "5": "68355", "6": "68356", "7": "68407", "8": "68410", "9": "68444", "10": "68531", "11": "68539", "12": "68540", "13": "68541", "14": "68543", "15": "68549", "16": "68573", "17": "68579", "18": "68592", "19": "68600", "20": "68601", "21": "68680", "22": "68682", "23": "68683", "24": "68820", "25": "68821", "26": "68837", "27": "68838", "28": "68839", "29": "68840", "30": "68841", "31": "68842", "32": "68843", "33": "68844", "34": "68851", "35": "68852", "36": "68853", "37": "68854", "38": "68860", "39": "68861", "40": "68862", "41": "68869", "42": "68872", "43": "68875", "44": "69001", "45": "69002", "46": "69170", "47": "69181", "48": "69182", "49": "69188", "50": "69193", "51": "69195", "52": "69196", "53": "69197", "54": "69198", "55": "69199", "56": "69200", "57": "69201", "58": "69202", "59": "69203", "60": "69204", "61": "69205", "62": "69206", "63": "69207", "64": "69208", "65": "69209", "66": "69210", "67": "69211", "68": "69554", "69": "69555", "70": "69561", "71": "69563", "72": "69564", "73": "69567", "74": "69682", "75": "69723", "76": "69726", "77": "69727", "78": "69732", "79": "69744"}}}}], "splits": [{"name": "train", "num_bytes": 88025524.8, "num_examples": 1600}, {"name": "test", "num_bytes": 21927703.0, "num_examples": 400}], "download_size": 109110671, "dataset_size": 109953227.8}} | 2023-11-09T15:49:08+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_29"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_29\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_29\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_29\"\n\nMore Information needed"
]
|
59f1da373241006643e3cd0ff99f5ca38f2a4aad | # Dataset Card for "Extractive-QA-type-2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | nlplabtdtu/Extractive-QA-type-2 | [
"region:us"
]
| 2023-11-09T15:49:03+00:00 | {"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}, {"name": "is_impossible", "dtype": "bool"}, {"name": "instruction", "dtype": "string"}, {"name": "prompt_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 22373994, "num_examples": 9217}], "download_size": 5375276, "dataset_size": 22373994}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-09T15:49:04+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "Extractive-QA-type-2"
More Information needed | [
"# Dataset Card for \"Extractive-QA-type-2\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"Extractive-QA-type-2\"\n\nMore Information needed"
]
| [
6,
18
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"Extractive-QA-type-2\"\n\nMore Information needed"
]
|
40992471f728d1f859ff3ff2ddd22d79d2aa8570 | # Dataset Card for "SWE-bench__style-3__fs-oracle"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | feedback-to-code/SWE-bench__style-3__fs-oracle | [
"region:us"
]
| 2023-11-09T16:04:26+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "instance_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "repo", "dtype": "string"}, {"name": "base_commit", "dtype": "string"}, {"name": "problem_statement", "dtype": "string"}, {"name": "hints_text", "dtype": "string"}, {"name": "created_at", "dtype": "timestamp[us]"}, {"name": "patch", "dtype": "string"}, {"name": "test_patch", "dtype": "string"}, {"name": "version", "dtype": "string"}, {"name": "FAIL_TO_PASS", "dtype": "string"}, {"name": "PASS_TO_PASS", "dtype": "string"}, {"name": "environment_setup_commit", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2791520.0, "num_examples": 40}, {"name": "validation", "num_bytes": 25060, "num_examples": 1}], "download_size": 999494, "dataset_size": 2816580.0}} | 2023-11-09T16:04:30+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "SWE-bench__style-3__fs-oracle"
More Information needed | [
"# Dataset Card for \"SWE-bench__style-3__fs-oracle\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"SWE-bench__style-3__fs-oracle\"\n\nMore Information needed"
]
| [
6,
23
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"SWE-bench__style-3__fs-oracle\"\n\nMore Information needed"
]
|
a65c3b128dfdd070eacb9b185619330a699b011b | # Dataset Card for "lsc_multiplechoice"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | tomashs/lsc_multiplechoice | [
"region:us"
]
| 2023-11-09T16:09:25+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "val", "path": "data/val-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "short_form", "dtype": "string"}, {"name": "long_form", "dtype": "string"}, {"name": "freq", "dtype": "int64"}, {"name": "num_candidates", "dtype": "int64"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 36335092, "num_examples": 110752}, {"name": "val", "num_bytes": 7920458, "num_examples": 25932}, {"name": "test", "num_bytes": 8281205, "num_examples": 25175}], "download_size": 24878537, "dataset_size": 52536755}} | 2023-11-09T16:09:39+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "lsc_multiplechoice"
More Information needed | [
"# Dataset Card for \"lsc_multiplechoice\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"lsc_multiplechoice\"\n\nMore Information needed"
]
| [
6,
17
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"lsc_multiplechoice\"\n\nMore Information needed"
]
|
ca5363194921b4007ab5bce13ee31cf08e130e0b | # Dataset Card for "JimmyLuAugRestChat"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | bigheiniuJ/JimmyLuAugRestChat | [
"region:us"
]
| 2023-11-09T16:12:57+00:00 | {"dataset_info": {"features": [{"name": "output", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "seed", "dtype": "string"}, {"name": "split", "dtype": "string"}, {"name": "task", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "id", "dtype": "int64"}, {"name": "aug_type", "dtype": "string"}, {"name": "aug_time", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 71287951, "num_examples": 143531}], "download_size": 11818939, "dataset_size": 71287951}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-09T16:12:58+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "JimmyLuAugRestChat"
More Information needed | [
"# Dataset Card for \"JimmyLuAugRestChat\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"JimmyLuAugRestChat\"\n\nMore Information needed"
]
| [
6,
18
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"JimmyLuAugRestChat\"\n\nMore Information needed"
]
|
4eba85ec7f56f49de562089b1d623dd7d83cbb25 | # Dataset Card for "bw_spec_cls_80_30"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_30 | [
"region:us"
]
| 2023-11-09T16:14:30+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "69745", "1": "69746", "2": "69747", "3": "69761", "4": "69762", "5": "69763", "6": "69764", "7": "69765", "8": "69766", "9": "69767", "10": "69768", "11": "69781", "12": "69784", "13": "69785", "14": "69787", "15": "69788", "16": "69789", "17": "69791", "18": "69792", "19": "69793", "20": "69798", "21": "69822", "22": "69823", "23": "69824", "24": "69825", "25": "69826", "26": "69827", "27": "69828", "28": "69830", "29": "69833", "30": "69904", "31": "70002", "32": "70005", "33": "70174", "34": "70206", "35": "70207", "36": "70208", "37": "70402", "38": "70409", "39": "70654", "40": "70655", "41": "70657", "42": "70660", "43": "70813", "44": "70873", "45": "70875", "46": "70878", "47": "70879", "48": "71096", "49": "71133", "50": "71157", "51": "71158", "52": "71172", "53": "71173", "54": "71174", "55": "71175", "56": "71216", "57": "71225", "58": "71228", "59": "71230", "60": "71231", "61": "71240", "62": "71241", "63": "71242", "64": "71243", "65": "71244", "66": "71245", "67": "71246", "68": "71247", "69": "71248", "70": "71249", "71": "71250", "72": "71251", "73": "71252", "74": "71253", "75": "71254", "76": "71255", "77": "71276", "78": "71507", "79": "71508"}}}}], "splits": [{"name": "train", "num_bytes": 86041601.6, "num_examples": 1600}, {"name": "test", "num_bytes": 21409325.0, "num_examples": 400}], "download_size": 106908531, "dataset_size": 107450926.6}} | 2023-11-09T16:14:47+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_30"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_30\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_30\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_30\"\n\nMore Information needed"
]
|
21584bc7df9621ec9b93b033e301dc7468d52882 | # Dataset Card for "zephyr-7b-beta-judgelm"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | alvarobartt/zephyr-7b-beta-judgelm | [
"region:us"
]
| 2023-11-09T16:23:34+00:00 | {"dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "generation_prompt", "dtype": "string"}, {"name": "raw_generation_responses", "sequence": "string"}, {"name": "generations", "sequence": "string"}, {"name": "labelling_prompt", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}, {"name": "raw_labelling_response", "struct": [{"name": "choices", "list": [{"name": "finish_reason", "dtype": "string"}, {"name": "index", "dtype": "int64"}, {"name": "message", "struct": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}]}, {"name": "created", "dtype": "int64"}, {"name": "id", "dtype": "string"}, {"name": "model", "dtype": "string"}, {"name": "object", "dtype": "string"}, {"name": "usage", "struct": [{"name": "completion_tokens", "dtype": "int64"}, {"name": "prompt_tokens", "dtype": "int64"}, {"name": "total_tokens", "dtype": "int64"}]}]}, {"name": "ratings", "sequence": "int64"}, {"name": "rationale", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1409108, "num_examples": 100}], "download_size": 455420, "dataset_size": 1409108}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-09T16:23:44+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "zephyr-7b-beta-judgelm"
More Information needed | [
"# Dataset Card for \"zephyr-7b-beta-judgelm\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"zephyr-7b-beta-judgelm\"\n\nMore Information needed"
]
| [
6,
21
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"zephyr-7b-beta-judgelm\"\n\nMore Information needed"
]
|
3a87bce92c2c39ec8ef044ef6ab5e49d1f863056 | # Dataset Card for "pubchem_enamine_standardized"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | phanvancongthanh/data_standardized | [
"region:us"
]
| 2023-11-09T16:33:20+00:00 | {"dataset_info": {"features": [{"name": "smiles", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 0, "num_examples": 0}], "download_size": 548, "dataset_size": 0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-16T08:11:40+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "pubchem_enamine_standardized"
More Information needed | [
"# Dataset Card for \"pubchem_enamine_standardized\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"pubchem_enamine_standardized\"\n\nMore Information needed"
]
| [
6,
18
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"pubchem_enamine_standardized\"\n\nMore Information needed"
]
|
7e59cc83c231a7aa22f7f51bcd9135f67563c091 | # Dataset Card for "bw_spec_cls_80_31"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_31 | [
"region:us"
]
| 2023-11-09T16:40:38+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "71509", "1": "71510", "2": "71511", "3": "71512", "4": "71513", "5": "71514", "6": "71515", "7": "71516", "8": "71617", "9": "71620", "10": "71622", "11": "71690", "12": "71691", "13": "71692", "14": "71693", "15": "71694", "16": "71695", "17": "71709", "18": "71714", "19": "71715", "20": "71719", "21": "71721", "22": "71822", "23": "71884", "24": "71885", "25": "71937", "26": "71938", "27": "72046", "28": "72047", "29": "72050", "30": "72056", "31": "72058", "32": "72059", "33": "72064", "34": "72067", "35": "72068", "36": "72069", "37": "72070", "38": "72071", "39": "72072", "40": "72073", "41": "72074", "42": "72075", "43": "72076", "44": "72129", "45": "72130", "46": "72131", "47": "72134", "48": "72135", "49": "72136", "50": "72146", "51": "72149", "52": "72200", "53": "72206", "54": "72210", "55": "72215", "56": "72232", "57": "72233", "58": "72234", "59": "72287", "60": "72288", "61": "72289", "62": "72290", "63": "72456", "64": "72468", "65": "72476", "66": "72477", "67": "72562", "68": "72565", "69": "72570", "70": "72604", "71": "72605", "72": "72607", "73": "72612", "74": "72738", "75": "72781", "76": "72782", "77": "72783", "78": "72784", "79": "72785"}}}}], "splits": [{"name": "train", "num_bytes": 85390192.0, "num_examples": 1600}, {"name": "test", "num_bytes": 21745201.0, "num_examples": 400}], "download_size": 108634813, "dataset_size": 107135393.0}} | 2023-11-09T16:40:54+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_31"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_31\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_31\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_31\"\n\nMore Information needed"
]
|
e91a39a62cfbea14f0260a3e503c588c4a2bfcc8 | # short-slovak-sentiment
Created from AIOD platform | mtkinit/short_slovak_sentiment | [
"region:us"
]
| 2023-11-09T16:40:46+00:00 | {"pretty_name": "short-slovak-sentiment"} | 2023-11-09T16:41:02+00:00 | []
| []
| TAGS
#region-us
| # short-slovak-sentiment
Created from AIOD platform | [
"# short-slovak-sentiment\nCreated from AIOD platform"
]
| [
"TAGS\n#region-us \n",
"# short-slovak-sentiment\nCreated from AIOD platform"
]
| [
6,
14
]
| [
"passage: TAGS\n#region-us \n# short-slovak-sentiment\nCreated from AIOD platform"
]
|
7ec6d70e92380d1872a11c12eae328c74c9f7a71 | # another-short-slovak-dataset
Created from AIOD platform | mtkinit/another_short_slovak_dataset | [
"region:us"
]
| 2023-11-09T16:42:09+00:00 | {"pretty_name": "another-short-slovak-dataset"} | 2023-11-09T16:42:09+00:00 | []
| []
| TAGS
#region-us
| # another-short-slovak-dataset
Created from AIOD platform | [
"# another-short-slovak-dataset\nCreated from AIOD platform"
]
| [
"TAGS\n#region-us \n",
"# another-short-slovak-dataset\nCreated from AIOD platform"
]
| [
6,
16
]
| [
"passage: TAGS\n#region-us \n# another-short-slovak-dataset\nCreated from AIOD platform"
]
|
143b29a92c7e769c75f9a8fed2155cffa00663a9 | French benchmark of NLU services for employee support use case during covid-19 pandemic.
These datasets were created by the Wikit team in order to compare the performances of NLU tools on the French language.
The dataset use case is employee support during the covid 19 pandemic. The intents were defined to answer department employees' questions on the evolution of work conditions related to the crisis.
- The training_dataset.csv file contains training utterances with associated intent used to train NLU services.
- The test_dataset.csv file contains test utterances with associated intent used to test NLU services.
To use this work, please cite :
> Marion Schaeffer, Christophe Bouvard. Comparaison des solutions de NLU sur un corpus français pour un chatbot de support COVID-19. IC 2022 - Journées francophones d’Ingénierie des Connaissances, Plate-Forme Intelligence Artificielle (PFIA'22), Jun 2022, Saint-Etienne, France. pp.199-208. ⟨hal-03727958⟩
| Wikit/nlu-covid | [
"task_categories:text-classification",
"language:fr",
"license:apache-2.0",
"region:us"
]
| 2023-11-09T16:48:46+00:00 | {"language": ["fr"], "license": "apache-2.0", "task_categories": ["text-classification"]} | 2023-11-09T17:04:37+00:00 | []
| [
"fr"
]
| TAGS
#task_categories-text-classification #language-French #license-apache-2.0 #region-us
| French benchmark of NLU services for employee support use case during covid-19 pandemic.
These datasets were created by the Wikit team in order to compare the performances of NLU tools on the French language.
The dataset use case is employee support during the covid 19 pandemic. The intents were defined to answer department employees' questions on the evolution of work conditions related to the crisis.
- The training_dataset.csv file contains training utterances with associated intent used to train NLU services.
- The test_dataset.csv file contains test utterances with associated intent used to test NLU services.
To use this work, please cite :
> Marion Schaeffer, Christophe Bouvard. Comparaison des solutions de NLU sur un corpus français pour un chatbot de support COVID-19. IC 2022 - Journées francophones d’Ingénierie des Connaissances, Plate-Forme Intelligence Artificielle (PFIA'22), Jun 2022, Saint-Etienne, France. pp.199-208. ⟨hal-03727958⟩
| []
| [
"TAGS\n#task_categories-text-classification #language-French #license-apache-2.0 #region-us \n"
]
| [
31
]
| [
"passage: TAGS\n#task_categories-text-classification #language-French #license-apache-2.0 #region-us \n"
]
|
1a867c5679f797524705e5e8ae3c31036f0817be | # Dataset Card for "midascontrol"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | dputilov/midascontrol | [
"region:us"
]
| 2023-11-09T16:54:37+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "conditioning_image", "dtype": "image"}, {"name": "mask", "dtype": "image"}, {"name": "text", "dtype": "string"}, {"name": "product", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 19218955898.595, "num_examples": 4945}], "download_size": 16207060690, "dataset_size": 19218955898.595}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-09T17:32:08+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "midascontrol"
More Information needed | [
"# Dataset Card for \"midascontrol\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"midascontrol\"\n\nMore Information needed"
]
| [
6,
13
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"midascontrol\"\n\nMore Information needed"
]
|
4cc91aaa0d102eb7371813eac6101df78cbb5524 | # Dataset Card for "rahul-gandhi-captions"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | fw1zr/rahul-gandhi-captions | [
"region:us"
]
| 2023-11-09T16:58:34+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4491114.0, "num_examples": 116}], "download_size": 4452636, "dataset_size": 4491114.0}} | 2023-11-09T16:58:53+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "rahul-gandhi-captions"
More Information needed | [
"# Dataset Card for \"rahul-gandhi-captions\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"rahul-gandhi-captions\"\n\nMore Information needed"
]
| [
6,
18
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"rahul-gandhi-captions\"\n\nMore Information needed"
]
|
fef2e6d750eb86f2dae61881811750093741aa55 | # Dataset Card for "bw_spec_cls_80_32"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_32 | [
"region:us"
]
| 2023-11-09T17:06:24+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "72786", "1": "72787", "2": "72788", "3": "72789", "4": "72790", "5": "72926", "6": "72927", "7": "72928", "8": "72930", "9": "73099", "10": "73100", "11": "73123", "12": "73124", "13": "73125", "14": "73169", "15": "73170", "16": "73171", "17": "73172", "18": "73174", "19": "73175", "20": "73192", "21": "73193", "22": "73306", "23": "73309", "24": "73318", "25": "73335", "26": "73340", "27": "73341", "28": "73342", "29": "73343", "30": "73344", "31": "73363", "32": "73365", "33": "73366", "34": "73367", "35": "73368", "36": "73369", "37": "73370", "38": "73371", "39": "73372", "40": "73465", "41": "73466", "42": "73467", "43": "73468", "44": "73469", "45": "73486", "46": "73495", "47": "73550", "48": "73551", "49": "73566", "50": "73568", "51": "73572", "52": "73573", "53": "73580", "54": "73584", "55": "73585", "56": "73587", "57": "73658", "58": "73675", "59": "73760", "60": "73761", "61": "73762", "62": "73764", "63": "73765", "64": "73766", "65": "73767", "66": "73768", "67": "73769", "68": "73770", "69": "73771", "70": "73772", "71": "73774", "72": "73778", "73": "73792", "74": "73797", "75": "73819", "76": "73820", "77": "73821", "78": "73822", "79": "73921"}}}}], "splits": [{"name": "train", "num_bytes": 85147582.4, "num_examples": 1600}, {"name": "test", "num_bytes": 21417107.0, "num_examples": 400}], "download_size": 107224330, "dataset_size": 106564689.4}} | 2023-11-09T17:06:39+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_32"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_32\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_32\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_32\"\n\nMore Information needed"
]
|
06c10450c0ab5ab162116e0f46214d38427b8338 | # Dataset Card for "PDEBench-1D-full"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | erbacher/PDEBench-1D-full | [
"region:us"
]
| 2023-11-09T17:11:51+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "parameters", "dtype": "string"}, {"name": "tensor", "sequence": {"sequence": {"sequence": "float32"}}}, {"name": "id", "dtype": "int64"}, {"name": "cls", "dtype": {"class_label": {"names": {"0": "1D_Advection_Sols_beta0.1.hdf5", "1": "1D_Advection_Sols_beta0.2.hdf5", "2": "1D_Advection_Sols_beta0.4.hdf5", "3": "1D_Advection_Sols_beta0.7.hdf5", "4": "1D_Advection_Sols_beta1.0.hdf5", "5": "1D_Advection_Sols_beta2.0.hdf5", "6": "1D_Advection_Sols_beta4.0.hdf5", "7": "1D_Advection_Sols_beta7.0.hdf5", "8": "1D_Burgers_Sols_Nu0.001.hdf5", "9": "1D_Burgers_Sols_Nu0.002.hdf5", "10": "1D_Burgers_Sols_Nu0.004.hdf5", "11": "1D_Burgers_Sols_Nu0.01.hdf5", "12": "1D_Burgers_Sols_Nu0.02.hdf5", "13": "1D_Burgers_Sols_Nu0.04.hdf5", "14": "1D_Burgers_Sols_Nu0.1.hdf5", "15": "1D_Burgers_Sols_Nu0.2.hdf5", "16": "1D_Burgers_Sols_Nu0.4.hdf5", "17": "1D_Burgers_Sols_Nu1.0.hdf5", "18": "1D_Burgers_Sols_Nu2.0.hdf5", "19": "1D_Burgers_Sols_Nu4.0.hdf5", "20": "1D_CFD_Rand_Eta0.01_Zeta0.01_periodic_Train.hdf5-density", "21": "1D_CFD_Rand_Eta0.01_Zeta0.01_periodic_Train.hdf5-pressure", "22": "1D_CFD_Rand_Eta0.01_Zeta0.01_periodic_Train.hdf5-vx", "23": "1D_CFD_Rand_Eta0.1_Zeta0.1_periodic_Train.hdf5-density", "24": "1D_CFD_Rand_Eta0.1_Zeta0.1_periodic_Train.hdf5-pressure", "25": "1D_CFD_Rand_Eta0.1_Zeta0.1_periodic_Train.hdf5-vx", "26": "1D_CFD_Rand_Eta1.e-8_Zeta1.e-8_periodic_Train.hdf5-density", "27": "1D_CFD_Rand_Eta1.e-8_Zeta1.e-8_periodic_Train.hdf5-pressure", "28": "1D_CFD_Rand_Eta1.e-8_Zeta1.e-8_periodic_Train.hdf5-vx", "29": "1D_CFD_Rand_Eta1.e-8_Zeta1.e-8_trans_Train.hdf5-density", "30": "1D_CFD_Rand_Eta1.e-8_Zeta1.e-8_trans_Train.hdf5-pressure", "31": "1D_CFD_Rand_Eta1.e-8_Zeta1.e-8_trans_Train.hdf5-vx", "32": "ReacDiff_Nu0.5_Rho1.0.hdf5", "33": "ReacDiff_Nu0.5_Rho10.0.hdf5", "34": "ReacDiff_Nu0.5_Rho2.0.hdf5", "35": "ReacDiff_Nu0.5_Rho5.0.hdf5", "36": "ReacDiff_Nu1.0_Rho1.0.hdf5", "37": "ReacDiff_Nu1.0_Rho10.0.hdf5", "38": "ReacDiff_Nu1.0_Rho2.0.hdf5", "39": "ReacDiff_Nu1.0_Rho5.0.hdf5", "40": "ReacDiff_Nu2.0_Rho1.0.hdf5", "41": "ReacDiff_Nu2.0_Rho10.0.hdf5", "42": "ReacDiff_Nu2.0_Rho2.0.hdf5", "43": "ReacDiff_Nu2.0_Rho5.0.hdf5", "44": "ReacDiff_Nu5.0_Rho1.0.hdf5", "45": "ReacDiff_Nu5.0_Rho10.0.hdf5", "46": "ReacDiff_Nu5.0_Rho2.0.hdf5", "47": "ReacDiff_Nu5.0_Rho5.0.hdf5"}}}}], "splits": [{"name": "train", "num_bytes": 99806420000, "num_examples": 480000}], "download_size": 2640711523, "dataset_size": 99806420000}} | 2023-11-09T21:03:53+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "PDEBench-1D-full"
More Information needed | [
"# Dataset Card for \"PDEBench-1D-full\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"PDEBench-1D-full\"\n\nMore Information needed"
]
| [
6,
18
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"PDEBench-1D-full\"\n\nMore Information needed"
]
|
e34ab5f49ce734cab780a3a7fcdf2eb4bf4ad7ba |
# Description
Jasper Ridge is a popular hyperspectral data used in [[enviTutorials](http://www.cossa.csiro.au/hswww/Overview.htm), [SS-NMF](http://arxiv.org/abs/1403.4682), [DgS-NMF](http://www.sciencedirect.com/science/article/pii/S0924271613002761), [RRLbS](http://arxiv.org/abs/1409.0685), [L1-CENMF](http://arxiv.org/abs/1305.7311)]. There are 512 x 614 pixels in it. Each pixel is recorded at 224 channels ranging from 380 nm to 2500 nm. The spectral resolution is up to 9.46nm. Since this hyperspectral image is too complex to get the ground truth, we consider a subimage of 100 x 100 pixels. The first pixel starts from the (105,269)-th pixel in the original image. After removing the channels 1--3, 108--112, 154--166 and 220--224 (due to dense water vapor and atmospheric effects), 198 channels are left (this is a common preprocess for HU analyses). There are four endmembers latent in this data: "#1 Road", "#2 Soil", "#3 Water" and "#4 Tree".
# Quick look
<figure>
<img src= "assets/1908991_orig.jpg" alt="Jasper Ridge" width="500" />
<figcaption>Jasper Ridge and its ground truth (GT:abundances and GT:endmembers).</figcaption>
</figure>
# Credits
Dataset originally collected by Feiyun Zhu and originally available at: http://www.escience.cn/people/feiyunZHU/Dataset_GT.html
To use this dataset, cite the associated paper:
```
@misc{zhu2017hyperspectral,
title={Hyperspectral Unmixing: Ground Truth Labeling, Datasets, Benchmark Performances and Survey},
author={Feiyun Zhu},
year={2017},
eprint={1708.05125},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
``` | danaroth/jasper_ridge | [
"license:unknown",
"arxiv:1403.4682",
"arxiv:1409.0685",
"arxiv:1305.7311",
"arxiv:1708.05125",
"region:us"
]
| 2023-11-09T17:29:18+00:00 | {"license": "unknown"} | 2023-11-10T08:22:05+00:00 | [
"1403.4682",
"1409.0685",
"1305.7311",
"1708.05125"
]
| []
| TAGS
#license-unknown #arxiv-1403.4682 #arxiv-1409.0685 #arxiv-1305.7311 #arxiv-1708.05125 #region-us
|
# Description
Jasper Ridge is a popular hyperspectral data used in [enviTutorials, SS-NMF, DgS-NMF, RRLbS, L1-CENMF]. There are 512 x 614 pixels in it. Each pixel is recorded at 224 channels ranging from 380 nm to 2500 nm. The spectral resolution is up to 9.46nm. Since this hyperspectral image is too complex to get the ground truth, we consider a subimage of 100 x 100 pixels. The first pixel starts from the (105,269)-th pixel in the original image. After removing the channels 1--3, 108--112, 154--166 and 220--224 (due to dense water vapor and atmospheric effects), 198 channels are left (this is a common preprocess for HU analyses). There are four endmembers latent in this data: "#1 Road", "#2 Soil", "#3 Water" and "#4 Tree".
# Quick look
<figure>
<img src= "assets/1908991_orig.jpg" alt="Jasper Ridge" width="500" />
<figcaption>Jasper Ridge and its ground truth (GT:abundances and GT:endmembers).</figcaption>
</figure>
# Credits
Dataset originally collected by Feiyun Zhu and originally available at: URL
To use this dataset, cite the associated paper:
| [
"# Description\n\nJasper Ridge is a popular hyperspectral data used in [enviTutorials, SS-NMF, DgS-NMF, RRLbS, L1-CENMF]. There are 512 x 614 pixels in it. Each pixel is recorded at 224 channels ranging from 380 nm to 2500 nm. The spectral resolution is up to 9.46nm. Since this hyperspectral image is too complex to get the ground truth, we consider a subimage of 100 x 100 pixels. The first pixel starts from the (105,269)-th pixel in the original image. After removing the channels 1--3, 108--112, 154--166 and 220--224 (due to dense water vapor and atmospheric effects), 198 channels are left (this is a common preprocess for HU analyses). There are four endmembers latent in this data: \"#1 Road\", \"#2 Soil\", \"#3 Water\" and \"#4 Tree\".",
"# Quick look\n\n<figure>\n <img src= \"assets/1908991_orig.jpg\" alt=\"Jasper Ridge\" width=\"500\" />\n <figcaption>Jasper Ridge and its ground truth (GT:abundances and GT:endmembers).</figcaption>\n</figure>",
"# Credits\n\nDataset originally collected by Feiyun Zhu and originally available at: URL\n\nTo use this dataset, cite the associated paper:"
]
| [
"TAGS\n#license-unknown #arxiv-1403.4682 #arxiv-1409.0685 #arxiv-1305.7311 #arxiv-1708.05125 #region-us \n",
"# Description\n\nJasper Ridge is a popular hyperspectral data used in [enviTutorials, SS-NMF, DgS-NMF, RRLbS, L1-CENMF]. There are 512 x 614 pixels in it. Each pixel is recorded at 224 channels ranging from 380 nm to 2500 nm. The spectral resolution is up to 9.46nm. Since this hyperspectral image is too complex to get the ground truth, we consider a subimage of 100 x 100 pixels. The first pixel starts from the (105,269)-th pixel in the original image. After removing the channels 1--3, 108--112, 154--166 and 220--224 (due to dense water vapor and atmospheric effects), 198 channels are left (this is a common preprocess for HU analyses). There are four endmembers latent in this data: \"#1 Road\", \"#2 Soil\", \"#3 Water\" and \"#4 Tree\".",
"# Quick look\n\n<figure>\n <img src= \"assets/1908991_orig.jpg\" alt=\"Jasper Ridge\" width=\"500\" />\n <figcaption>Jasper Ridge and its ground truth (GT:abundances and GT:endmembers).</figcaption>\n</figure>",
"# Credits\n\nDataset originally collected by Feiyun Zhu and originally available at: URL\n\nTo use this dataset, cite the associated paper:"
]
| [
46,
225,
76,
33
]
| [
"passage: TAGS\n#license-unknown #arxiv-1403.4682 #arxiv-1409.0685 #arxiv-1305.7311 #arxiv-1708.05125 #region-us \n# Description\n\nJasper Ridge is a popular hyperspectral data used in [enviTutorials, SS-NMF, DgS-NMF, RRLbS, L1-CENMF]. There are 512 x 614 pixels in it. Each pixel is recorded at 224 channels ranging from 380 nm to 2500 nm. The spectral resolution is up to 9.46nm. Since this hyperspectral image is too complex to get the ground truth, we consider a subimage of 100 x 100 pixels. The first pixel starts from the (105,269)-th pixel in the original image. After removing the channels 1--3, 108--112, 154--166 and 220--224 (due to dense water vapor and atmospheric effects), 198 channels are left (this is a common preprocess for HU analyses). There are four endmembers latent in this data: \"#1 Road\", \"#2 Soil\", \"#3 Water\" and \"#4 Tree\".# Quick look\n\n<figure>\n <img src= \"assets/1908991_orig.jpg\" alt=\"Jasper Ridge\" width=\"500\" />\n <figcaption>Jasper Ridge and its ground truth (GT:abundances and GT:endmembers).</figcaption>\n</figure># Credits\n\nDataset originally collected by Feiyun Zhu and originally available at: URL\n\nTo use this dataset, cite the associated paper:"
]
|
ed55454e16e2e8e228f809b75696b8da0b9c82d5 | # Dataset Card for "bw_spec_cls_80_33"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_33 | [
"region:us"
]
| 2023-11-09T17:32:21+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "74302", "1": "74347", "2": "74348", "3": "74362", "4": "74365", "5": "74370", "6": "74371", "7": "74372", "8": "74373", "9": "74375", "10": "74377", "11": "74378", "12": "74380", "13": "74381", "14": "74382", "15": "74383", "16": "74384", "17": "74385", "18": "74386", "19": "74387", "20": "74388", "21": "74390", "22": "74392", "23": "74421", "24": "74445", "25": "74546", "26": "74669", "27": "74671", "28": "74706", "29": "74908", "30": "74942", "31": "74954", "32": "74955", "33": "74959", "34": "74960", "35": "75194", "36": "75221", "37": "75230", "38": "75304", "39": "75310", "40": "75314", "41": "75317", "42": "75429", "43": "75430", "44": "75431", "45": "75432", "46": "75433", "47": "75434", "48": "75435", "49": "75436", "50": "75437", "51": "75438", "52": "75439", "53": "75440", "54": "75441", "55": "75442", "56": "75443", "57": "75607", "58": "75612", "59": "75692", "60": "75762", "61": "75763", "62": "75764", "63": "75782", "64": "75783", "65": "75784", "66": "75785", "67": "75786", "68": "75787", "69": "75788", "70": "75844", "71": "75862", "72": "75866", "73": "75869", "74": "75883", "75": "75903", "76": "75908", "77": "75925", "78": "75926", "79": "75927"}}}}], "splits": [{"name": "train", "num_bytes": 88794100.8, "num_examples": 1600}, {"name": "test", "num_bytes": 22341388.0, "num_examples": 400}], "download_size": 111396696, "dataset_size": 111135488.8}} | 2023-11-09T17:32:41+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_33"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_33\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_33\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_33\"\n\nMore Information needed"
]
|
28cf37b59289d747819da31a56f9c05f1bcdcb6b | # Dataset Card for "bw_spec_cls_80_34"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_34 | [
"region:us"
]
| 2023-11-09T17:58:22+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "75928", "1": "75929", "2": "75930", "3": "75931", "4": "75932", "5": "75933", "6": "75935", "7": "75936", "8": "75937", "9": "75975", "10": "76036", "11": "76069", "12": "76071", "13": "76072", "14": "76073", "15": "76074", "16": "76075", "17": "76076", "18": "76077", "19": "76078", "20": "76079", "21": "76121", "22": "76375", "23": "76381", "24": "76437", "25": "76440", "26": "76654", "27": "76659", "28": "77517", "29": "77519", "30": "77521", "31": "77522", "32": "77523", "33": "77564", "34": "77571", "35": "77572", "36": "77952", "37": "78038", "38": "78156", "39": "78213", "40": "78516", "41": "78833", "42": "78834", "43": "78839", "44": "78841", "45": "78843", "46": "78845", "47": "78847", "48": "78848", "49": "78849", "50": "78850", "51": "78851", "52": "78852", "53": "78984", "54": "78998", "55": "79087", "56": "79575", "57": "79593", "58": "79605", "59": "79606", "60": "79610", "61": "79616", "62": "79741", "63": "79973", "64": "79975", "65": "79977", "66": "79978", "67": "79985", "68": "79986", "69": "79988", "70": "79990", "71": "79995", "72": "80035", "73": "80293", "74": "80341", "75": "80351", "76": "80389", "77": "80402", "78": "80515", "79": "80516"}}}}], "splits": [{"name": "train", "num_bytes": 88501139.2, "num_examples": 1600}, {"name": "test", "num_bytes": 21775350.0, "num_examples": 400}], "download_size": 109195616, "dataset_size": 110276489.2}} | 2023-11-09T17:58:39+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_34"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_34\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_34\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_34\"\n\nMore Information needed"
]
|
b52d6ca62847600282426231f98a1cfdfdef5c7b | # Dataset Card for "JimmyLuAugConsistent"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | bigheiniuJ/JimmyLuAugConsistent | [
"region:us"
]
| 2023-11-09T18:04:33+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "task", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "aug_type", "dtype": "string"}, {"name": "aug_time", "dtype": "int64"}, {"name": "output", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "seed", "dtype": "string"}, {"name": "split", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2719565, "num_examples": 9450}], "download_size": 1104965, "dataset_size": 2719565}} | 2023-11-09T18:04:35+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "JimmyLuAugConsistent"
More Information needed | [
"# Dataset Card for \"JimmyLuAugConsistent\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"JimmyLuAugConsistent\"\n\nMore Information needed"
]
| [
6,
19
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"JimmyLuAugConsistent\"\n\nMore Information needed"
]
|
bb8c32cf77490f67f0357ba37d362e1abde74245 | # Dataset Card for "mind_prompts"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | jiuyuan/mind_prompts | [
"region:us"
]
| 2023-11-09T18:15:56+00:00 | {"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 106750384, "num_examples": 48976}], "download_size": 38303071, "dataset_size": 106750384}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-09T18:16:00+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "mind_prompts"
More Information needed | [
"# Dataset Card for \"mind_prompts\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"mind_prompts\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"mind_prompts\"\n\nMore Information needed"
]
|
99424a65f270c025ae9104eef03bff5d9e8bf5c3 | # Dataset Card for "bw_spec_cls_80_35"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_35 | [
"region:us"
]
| 2023-11-09T18:24:44+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "80517", "1": "80518", "2": "80519", "3": "80520", "4": "80693", "5": "80694", "6": "80695", "7": "80696", "8": "80697", "9": "80751", "10": "80753", "11": "80754", "12": "80755", "13": "80756", "14": "80758", "15": "80765", "16": "80766", "17": "80772", "18": "80773", "19": "80774", "20": "80775", "21": "80776", "22": "80793", "23": "80833", "24": "80834", "25": "80835", "26": "80836", "27": "81033", "28": "81037", "29": "81082", "30": "81083", "31": "81084", "32": "81085", "33": "81189", "34": "81193", "35": "81194", "36": "81195", "37": "81362", "38": "81365", "39": "81436", "40": "81457", "41": "81485", "42": "81491", "43": "81512", "44": "81523", "45": "81543", "46": "81554", "47": "81555", "48": "81565", "49": "81576", "50": "81586", "51": "81600", "52": "81612", "53": "81613", "54": "81623", "55": "81638", "56": "81650", "57": "81660", "58": "81781", "59": "81782", "60": "81792", "61": "81802", "62": "81803", "63": "81814", "64": "81868", "65": "81938", "66": "81945", "67": "81946", "68": "81988", "69": "81999", "70": "82157", "71": "82231", "72": "82237", "73": "82242", "74": "82250", "75": "82410", "76": "82505", "77": "82507", "78": "82628", "79": "82629"}}}}], "splits": [{"name": "train", "num_bytes": 90214491.2, "num_examples": 1600}, {"name": "test", "num_bytes": 22067286.0, "num_examples": 400}], "download_size": 110421965, "dataset_size": 112281777.2}} | 2023-11-09T18:25:04+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_35"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_35\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_35\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_35\"\n\nMore Information needed"
]
|
178715f2b2c915e1ff7603d743b778dc32fc42e3 | ## PwC4KPG dataset
Due to the strict copyright restriction, the dataset is only available for non-commercial research use ONLY.
Currently it requires manual approval for access. Please send an email to [email protected], stating (1) Huggingface account name; (2) institute/company name; (3) the purpose of using this dataset.
## PwC4KPG dataset
we extract the **fields, tasks, methods, datasets, metrics, titles and abstracts** from the raw corpus of PwC, provided that the paper has a full title and abstract.
A total of 6,012 papers were extracted, of which 2,119 included all five categories of “keyphrases”, and the remaining 3,839 contained only some of them.
Note that PwC does not contain the research fields as we define them, so we used the “main_collection” of methods as an alternative.
**Train: 5,012 / Dev: 500 / Test: 500**
We randomly select 1,000 papers with full information,half of which are used for testing and the other half for validation. The remaining 5,012 served as the training set.
**Paper: JASIST 2023, Generating keyphrases for readers: A controllable keyphrase generation framework.**

```
@inproceedings{Jiang2023PwC4KPG,
title={ Generating keyphrases for readers: A controllable keyphrase generation framework},
author={Jiang, Yi and Meng, Rui and Huang, Yong and Lu, Wei and Liu, Jiawei},
booktitle={Journal of the Association for Information Science and Technology},
year={2023},
volume={74},
issue={7},
pages={759--774},
}
``` | JoyeJiang/PwC4KPG | [
"task_categories:text-generation",
"size_categories:1K<n<10K",
"language:en",
"region:us"
]
| 2023-11-09T18:40:40+00:00 | {"language": ["en"], "size_categories": ["1K<n<10K"], "task_categories": ["text-generation"]} | 2023-11-09T19:27:08+00:00 | []
| [
"en"
]
| TAGS
#task_categories-text-generation #size_categories-1K<n<10K #language-English #region-us
| ## PwC4KPG dataset
Due to the strict copyright restriction, the dataset is only available for non-commercial research use ONLY.
Currently it requires manual approval for access. Please send an email to yijiang@URL, stating (1) Huggingface account name; (2) institute/company name; (3) the purpose of using this dataset.
## PwC4KPG dataset
we extract the fields, tasks, methods, datasets, metrics, titles and abstracts from the raw corpus of PwC, provided that the paper has a full title and abstract.
A total of 6,012 papers were extracted, of which 2,119 included all five categories of “keyphrases”, and the remaining 3,839 contained only some of them.
Note that PwC does not contain the research fields as we define them, so we used the “main_collection” of methods as an alternative.
Train: 5,012 / Dev: 500 / Test: 500
We randomly select 1,000 papers with full information,half of which are used for testing and the other half for validation. The remaining 5,012 served as the training set.
Paper: JASIST 2023, Generating keyphrases for readers: A controllable keyphrase generation framework.
!image/png
| [
"## PwC4KPG dataset\n\nDue to the strict copyright restriction, the dataset is only available for non-commercial research use ONLY.\n\nCurrently it requires manual approval for access. Please send an email to yijiang@URL, stating (1) Huggingface account name; (2) institute/company name; (3) the purpose of using this dataset.",
"## PwC4KPG dataset\n\nwe extract the fields, tasks, methods, datasets, metrics, titles and abstracts from the raw corpus of PwC, provided that the paper has a full title and abstract.\nA total of 6,012 papers were extracted, of which 2,119 included all five categories of “keyphrases”, and the remaining 3,839 contained only some of them.\nNote that PwC does not contain the research fields as we define them, so we used the “main_collection” of methods as an alternative.\n\nTrain: 5,012 / Dev: 500 / Test: 500 \nWe randomly select 1,000 papers with full information,half of which are used for testing and the other half for validation. The remaining 5,012 served as the training set.\n\nPaper: JASIST 2023, Generating keyphrases for readers: A controllable keyphrase generation framework.\n\n!image/png"
]
| [
"TAGS\n#task_categories-text-generation #size_categories-1K<n<10K #language-English #region-us \n",
"## PwC4KPG dataset\n\nDue to the strict copyright restriction, the dataset is only available for non-commercial research use ONLY.\n\nCurrently it requires manual approval for access. Please send an email to yijiang@URL, stating (1) Huggingface account name; (2) institute/company name; (3) the purpose of using this dataset.",
"## PwC4KPG dataset\n\nwe extract the fields, tasks, methods, datasets, metrics, titles and abstracts from the raw corpus of PwC, provided that the paper has a full title and abstract.\nA total of 6,012 papers were extracted, of which 2,119 included all five categories of “keyphrases”, and the remaining 3,839 contained only some of them.\nNote that PwC does not contain the research fields as we define them, so we used the “main_collection” of methods as an alternative.\n\nTrain: 5,012 / Dev: 500 / Test: 500 \nWe randomly select 1,000 papers with full information,half of which are used for testing and the other half for validation. The remaining 5,012 served as the training set.\n\nPaper: JASIST 2023, Generating keyphrases for readers: A controllable keyphrase generation framework.\n\n!image/png"
]
| [
33,
80,
207
]
| [
"passage: TAGS\n#task_categories-text-generation #size_categories-1K<n<10K #language-English #region-us \n## PwC4KPG dataset\n\nDue to the strict copyright restriction, the dataset is only available for non-commercial research use ONLY.\n\nCurrently it requires manual approval for access. Please send an email to yijiang@URL, stating (1) Huggingface account name; (2) institute/company name; (3) the purpose of using this dataset.## PwC4KPG dataset\n\nwe extract the fields, tasks, methods, datasets, metrics, titles and abstracts from the raw corpus of PwC, provided that the paper has a full title and abstract.\nA total of 6,012 papers were extracted, of which 2,119 included all five categories of “keyphrases”, and the remaining 3,839 contained only some of them.\nNote that PwC does not contain the research fields as we define them, so we used the “main_collection” of methods as an alternative.\n\nTrain: 5,012 / Dev: 500 / Test: 500 \nWe randomly select 1,000 papers with full information,half of which are used for testing and the other half for validation. The remaining 5,012 served as the training set.\n\nPaper: JASIST 2023, Generating keyphrases for readers: A controllable keyphrase generation framework.\n\n!image/png"
]
|
9d33070f19a00cc8ac14198635cf9f1a10e83dae | # Dataset Card for "bw_spec_cls_80_36"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_36 | [
"region:us"
]
| 2023-11-09T18:51:14+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "82630", "1": "82631", "2": "82881", "3": "82886", "4": "82890", "5": "82892", "6": "82893", "7": "82914", "8": "82915", "9": "82916", "10": "82917", "11": "82918", "12": "82919", "13": "82920", "14": "82921", "15": "82928", "16": "82929", "17": "82930", "18": "82931", "19": "82932", "20": "83600", "21": "83612", "22": "83613", "23": "83715", "24": "83717", "25": "83718", "26": "83719", "27": "83789", "28": "83790", "29": "83791", "30": "83903", "31": "83911", "32": "83913", "33": "83954", "34": "83960", "35": "83969", "36": "84009", "37": "84055", "38": "84056", "39": "84058", "40": "84095", "41": "84096", "42": "84097", "43": "84111", "44": "84135", "45": "84136", "46": "84139", "47": "84141", "48": "84142", "49": "84144", "50": "84154", "51": "84155", "52": "84156", "53": "84157", "54": "84158", "55": "84159", "56": "84195", "57": "84198", "58": "84200", "59": "84201", "60": "84202", "61": "84264", "62": "84290", "63": "84291", "64": "84405", "65": "84417", "66": "84423", "67": "84483", "68": "84484", "69": "84485", "70": "84486", "71": "84605", "72": "84743", "73": "84757", "74": "84768", "75": "84788", "76": "84817", "77": "85027", "78": "85038", "79": "85039"}}}}], "splits": [{"name": "train", "num_bytes": 86231214.4, "num_examples": 1600}, {"name": "test", "num_bytes": 21669535.0, "num_examples": 400}], "download_size": 107649160, "dataset_size": 107900749.4}} | 2023-11-09T18:51:31+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_36"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_36\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_36\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_36\"\n\nMore Information needed"
]
|
a6424c8006ecf0b86563c3525cc0003227651c46 |
# Dataset Card for Dataset Name
<!-- Provide a quick summary of the dataset. -->
This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1).
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] | papanton/antonios | [
"region:us"
]
| 2023-11-09T19:00:33+00:00 | {} | 2023-11-09T19:05:09+00:00 | []
| []
| TAGS
#region-us
|
# Dataset Card for Dataset Name
This dataset card aims to be a base template for new datasets. It has been generated using this raw template.
## Dataset Details
### Dataset Description
- Curated by:
- Funded by [optional]:
- Shared by [optional]:
- Language(s) (NLP):
- License:
### Dataset Sources [optional]
- Repository:
- Paper [optional]:
- Demo [optional]:
## Uses
### Direct Use
### Out-of-Scope Use
## Dataset Structure
## Dataset Creation
### Curation Rationale
### Source Data
#### Data Collection and Processing
#### Who are the source data producers?
### Annotations [optional]
#### Annotation process
#### Who are the annotators?
#### Personal and Sensitive Information
## Bias, Risks, and Limitations
### Recommendations
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
[optional]
BibTeX:
APA:
## Glossary [optional]
## More Information [optional]
## Dataset Card Authors [optional]
## Dataset Card Contact
| [
"# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.",
"## Dataset Details",
"### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:",
"### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:",
"## Uses",
"### Direct Use",
"### Out-of-Scope Use",
"## Dataset Structure",
"## Dataset Creation",
"### Curation Rationale",
"### Source Data",
"#### Data Collection and Processing",
"#### Who are the source data producers?",
"### Annotations [optional]",
"#### Annotation process",
"#### Who are the annotators?",
"#### Personal and Sensitive Information",
"## Bias, Risks, and Limitations",
"### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:",
"## Glossary [optional]",
"## More Information [optional]",
"## Dataset Card Authors [optional]",
"## Dataset Card Contact"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.",
"## Dataset Details",
"### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:",
"### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:",
"## Uses",
"### Direct Use",
"### Out-of-Scope Use",
"## Dataset Structure",
"## Dataset Creation",
"### Curation Rationale",
"### Source Data",
"#### Data Collection and Processing",
"#### Who are the source data producers?",
"### Annotations [optional]",
"#### Annotation process",
"#### Who are the annotators?",
"#### Personal and Sensitive Information",
"## Bias, Risks, and Limitations",
"### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:",
"## Glossary [optional]",
"## More Information [optional]",
"## Dataset Card Authors [optional]",
"## Dataset Card Contact"
]
| [
6,
34,
4,
40,
29,
3,
4,
9,
6,
5,
7,
4,
7,
10,
9,
5,
9,
8,
10,
46,
8,
7,
10,
5
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact"
]
|
e0667e29148edea91df8e91e9331d99f6711a47a | asfdf | ngcgarcia/foo | [
"region:us"
]
| 2023-11-09T19:04:39+00:00 | {} | 2023-11-09T19:04:48+00:00 | []
| []
| TAGS
#region-us
| asfdf | []
| [
"TAGS\n#region-us \n"
]
| [
6
]
| [
"passage: TAGS\n#region-us \n"
]
|
b088f1a3d5f9516c05dec319c46431bcb3023819 | # Dataset Card for "AudioMNIST"
The [audioMNIST](https://github.com/soerenab/AudioMNIST) dataset has 50 English recordings per digit (0-9) of 60 speakers.
There are 60 participants in total, with 12 being women and 48 being men, all featuring a diverse range of accents and country of origin. Their ages vary from 22 to 61 years old. This is a great dataset to explore a simple audio classification problem: either the digit or the gender.
## Bias, Risks, and Limitations
* The genders represented in the dataset are unbalanced, with around 80% being men.
* The majority of the speakers, around 70%, have a German accent
### Citation Information
The original creators of the dataset ask you to cite [their paper](https://arxiv.org/abs/1807.03418) if you use this data:
```
@ARTICLE{becker2018interpreting,
author = {Becker, S\"oren and Ackermann, Marcel and Lapuschkin, Sebastian and M\"uller, Klaus-Robert and Samek, Wojciech},
title = {Interpreting and Explaining Deep Neural Networks for Classification of Audio Signals},
journal = {CoRR},
volume = {abs/1807.03418},
year = {2018},
archivePrefix = {arXiv},
eprint = {1807.03418},
}
``` | gilkeyio/AudioMNIST | [
"task_categories:audio-classification",
"size_categories:10K<n<100K",
"language:en",
"license:mit",
"arxiv:1807.03418",
"region:us"
]
| 2023-11-09T19:04:43+00:00 | {"language": ["en"], "license": "mit", "size_categories": ["10K<n<100K"], "task_categories": ["audio-classification"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "speaker_id", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "digit", "dtype": {"class_label": {"names": {"0": "0", "1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "6": "6", "7": "7", "8": "8", "9": "9"}}}}, {"name": "gender", "dtype": {"class_label": {"names": {"0": "male", "1": "female"}}}}, {"name": "accent", "dtype": "string"}, {"name": "age", "dtype": "int64"}, {"name": "native_speaker", "dtype": "bool"}, {"name": "origin", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1493209727.0, "num_examples": 24000}, {"name": "test", "num_bytes": 360966680.0, "num_examples": 6000}], "download_size": 1483680961, "dataset_size": 1854176407.0}} | 2023-11-22T15:28:13+00:00 | [
"1807.03418"
]
| [
"en"
]
| TAGS
#task_categories-audio-classification #size_categories-10K<n<100K #language-English #license-mit #arxiv-1807.03418 #region-us
| # Dataset Card for "AudioMNIST"
The audioMNIST dataset has 50 English recordings per digit (0-9) of 60 speakers.
There are 60 participants in total, with 12 being women and 48 being men, all featuring a diverse range of accents and country of origin. Their ages vary from 22 to 61 years old. This is a great dataset to explore a simple audio classification problem: either the digit or the gender.
## Bias, Risks, and Limitations
* The genders represented in the dataset are unbalanced, with around 80% being men.
* The majority of the speakers, around 70%, have a German accent
The original creators of the dataset ask you to cite their paper if you use this data:
| [
"# Dataset Card for \"AudioMNIST\"\nThe audioMNIST dataset has 50 English recordings per digit (0-9) of 60 speakers.\nThere are 60 participants in total, with 12 being women and 48 being men, all featuring a diverse range of accents and country of origin. Their ages vary from 22 to 61 years old. This is a great dataset to explore a simple audio classification problem: either the digit or the gender.",
"## Bias, Risks, and Limitations\n* The genders represented in the dataset are unbalanced, with around 80% being men.\n* The majority of the speakers, around 70%, have a German accent\n\n\nThe original creators of the dataset ask you to cite their paper if you use this data:"
]
| [
"TAGS\n#task_categories-audio-classification #size_categories-10K<n<100K #language-English #license-mit #arxiv-1807.03418 #region-us \n",
"# Dataset Card for \"AudioMNIST\"\nThe audioMNIST dataset has 50 English recordings per digit (0-9) of 60 speakers.\nThere are 60 participants in total, with 12 being women and 48 being men, all featuring a diverse range of accents and country of origin. Their ages vary from 22 to 61 years old. This is a great dataset to explore a simple audio classification problem: either the digit or the gender.",
"## Bias, Risks, and Limitations\n* The genders represented in the dataset are unbalanced, with around 80% being men.\n* The majority of the speakers, around 70%, have a German accent\n\n\nThe original creators of the dataset ask you to cite their paper if you use this data:"
]
| [
48,
96,
66
]
| [
"passage: TAGS\n#task_categories-audio-classification #size_categories-10K<n<100K #language-English #license-mit #arxiv-1807.03418 #region-us \n# Dataset Card for \"AudioMNIST\"\nThe audioMNIST dataset has 50 English recordings per digit (0-9) of 60 speakers.\nThere are 60 participants in total, with 12 being women and 48 being men, all featuring a diverse range of accents and country of origin. Their ages vary from 22 to 61 years old. This is a great dataset to explore a simple audio classification problem: either the digit or the gender.## Bias, Risks, and Limitations\n* The genders represented in the dataset are unbalanced, with around 80% being men.\n* The majority of the speakers, around 70%, have a German accent\n\n\nThe original creators of the dataset ask you to cite their paper if you use this data:"
]
|
faf519e1440b71e351d828127c8ebb7998fe452f | # Dataset Card for "bw_spec_cls_80_37"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_37 | [
"region:us"
]
| 2023-11-09T19:16:58+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "85040", "1": "85041", "2": "85290", "3": "85291", "4": "85317", "5": "85318", "6": "85343", "7": "85346", "8": "85347", "9": "85400", "10": "85419", "11": "85420", "12": "85421", "13": "85422", "14": "85423", "15": "85424", "16": "85425", "17": "85426", "18": "85427", "19": "85428", "20": "85482", "21": "85484", "22": "85485", "23": "85486", "24": "85487", "25": "85488", "26": "85489", "27": "85490", "28": "85491", "29": "85492", "30": "85494", "31": "85691", "32": "85692", "33": "85693", "34": "85787", "35": "85788", "36": "85791", "37": "85792", "38": "85816", "39": "85817", "40": "85822", "41": "85823", "42": "85828", "43": "85831", "44": "85832", "45": "85833", "46": "85834", "47": "85835", "48": "85836", "49": "85837", "50": "85838", "51": "85839", "52": "85840", "53": "85950", "54": "85951", "55": "85952", "56": "85953", "57": "85954", "58": "85955", "59": "85956", "60": "85957", "61": "85963", "62": "85966", "63": "85967", "64": "86140", "65": "86259", "66": "86417", "67": "86419", "68": "86441", "69": "86443", "70": "86481", "71": "86482", "72": "86483", "73": "86484", "74": "86485", "75": "86486", "76": "86487", "77": "86562", "78": "86576", "79": "86623"}}}}], "splits": [{"name": "train", "num_bytes": 89275462.4, "num_examples": 1600}, {"name": "test", "num_bytes": 22091651.0, "num_examples": 400}], "download_size": 110582704, "dataset_size": 111367113.4}} | 2023-11-09T19:17:18+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_37"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_37\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_37\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_37\"\n\nMore Information needed"
]
|
183626cafdacaf90a7d7081c6811300e2da8f794 | # Dataset Card for "temp031"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | growth-cadet/temp031 | [
"region:us"
]
| 2023-11-09T19:26:13+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "round_name", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "sequence", "dtype": "string"}, {"name": "labels", "sequence": "string"}, {"name": "scores", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 1296454, "num_examples": 4098}], "download_size": 103556, "dataset_size": 1296454}} | 2023-11-09T19:26:15+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "temp031"
More Information needed | [
"# Dataset Card for \"temp031\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"temp031\"\n\nMore Information needed"
]
| [
6,
13
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"temp031\"\n\nMore Information needed"
]
|
ebb76a93ed171db4f37d13e5c8312e0a3fcdbedb |
<div align="center">
<h1 align="center">🫁 LyNoS 🤗</h1>
<h3 align="center">A multilabel lymph node segmentation dataset from contrast CT</h3>
**LyNoS** was developed by SINTEF Medical Image Analysis to accelerate medical AI research.
</div>
## [Brief intro](https://github.com/raidionics/LyNoS#brief-intro)
This repository contains the LyNoS dataset described in ["_Mediastinal lymph nodes segmentation using 3D convolutional neural network ensembles and anatomical priors guiding_"](https://doi.org/10.1080/21681163.2022.2043778).
The dataset has now also been uploaded to Zenodo and the Hugging Face Hub enabling users to more easily access the data through Python API.
We have also developed a web demo to enable others to easily test the pretrained model presented in the paper. The application was developed using [Gradio](https://www.gradio.app) for the frontend and the segmentation is performed using the [Raidionics](https://raidionics.github.io/) backend.
## [Dataset](https://github.com/raidionics/LyNoS#data) <a href="https://colab.research.google.com/gist/andreped/274bf953771059fd9537877404369bed/lynos-load-dataset-example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
### [Accessing dataset](https://github.com/raidionics/LyNoS#accessing-dataset)
The dataset contains 15 CTs with corresponding lymph nodes, azygos, esophagus, and subclavian carotid arteries. The folder structure is described below.
The easiest way to access the data is through Python with Hugging Face's [datasets](https://pypi.org/project/datasets/) package:
```
from datasets import load_dataset
# downloads data from Zenodo through the Hugging Face hub
# - might take several minutes (~5 minutes in CoLab)
dataset = load_dataset("andreped/LyNoS")
print(dataset)
# list paths of all available patients and corresponding features (ct/lymphnodes/azygos/brachiocephalicveins/esophagus/subclaviancarotidarteries)
for d in dataset["test"]:
print(d)
```
A detailed interactive demo on how to load and work with the data can be seen on CoLab. Click the CoLab badge <a href="https://colab.research.google.com/gist/andreped/274bf953771059fd9537877404369bed/lynos-load-dataset-example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> to see the notebook or alternatively click [here](https://github.com/raidionics/LyNoS/blob/main/notebooks/lynos-load-dataset-example.ipynb) to see it on GitHub.
### [Dataset structure](https://github.com/raidionics/LyNoS#dataset-structure)
```
└── LyNoS.zip
├── stations_sto.csv
└── LyNoS/
├── Pat1/
│ ├── pat1_data.nii.gz
│ ├── pat1_labels_Azygos.nii.gz
│ ├── pat1_labels_Esophagus.nii.gz
│ ├── pat1_labels_LymphNodes.nii.gz
│ └── pat1_labels_SubCarArt.nii.gz
├── [...]
└── Pat15/
├── pat15_data.nii.gz
├── pat15_labels_Azygos.nii.gz
├── pat15_labels_Esophagus.nii.gz
├── pat15_labels_LymphNodes.nii.gz
└── pat15_labels_SubCarArt.nii.gz
```
### [NIH Dataset Completion](https://github.com/raidionics/LyNoS#nih-dataset-completion)
A larger dataset made of 90 patients featuring enlarged lymph nodes has also been made available by the National Institutes of Health, and is available for download on the official [web-page](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=19726546).
As a supplement to this dataset, lymph nodes segmentation masks have been refined for all patients and stations have been manually assigned to each, available [here](https://drive.google.com/uc?id=1iVCnZc1GHwtx9scyAXdANqz2HdQArTHn).
## [Demo](https://github.com/raidionics/LyNoS#demo) <a target="_blank" href="https://huggingface.co/spaces/andreped/LyNoS"><img src="https://img.shields.io/badge/🤗%20Hugging%20Face-Spaces-yellow.svg"></a>
To access the live demo, click on the `Hugging Face` badge above. Below is a snapshot of the current state of the demo app.
<img width="1400" alt="Screenshot 2023-11-09 at 20 53 29" src="https://github.com/raidionics/LyNoS/assets/29090665/ce661da0-d172-4481-b9b5-8b3e29a9fc1f">
## [Development](https://github.com/raidionics/LyNoS#development)
### [Docker](https://github.com/raidionics/LyNoS#docker)
Alternatively, you can deploy the software locally. Note that this is only relevant for development purposes. Simply dockerize the app and run it:
```
docker build -t LyNoS .
docker run -it -p 7860:7860 LyNoS
```
Then open `http://127.0.0.1:7860` in your favourite internet browser to view the demo.
### [Python](https://github.com/raidionics/LyNoS#python)
It is also possible to run the app locally without Docker. Just setup a virtual environment and run the app.
Note that the current working directory would need to be adjusted based on where `LyNoS` is located on disk.
```
git clone https://github.com/raidionics/LyNoS.git
cd LyNoS/
virtualenv -python3 venv --clear
source venv/bin/activate
pip install -r ./demo/requirements.txt
python demo/app.py --cwd ./
```
## [Citation](https://github.com/raidionics/LyNoS#citation)
If you found the dataset and/or web application relevant in your research, please cite the following reference:
```
@article{bouget2021mediastinal,
author = {David Bouget and André Pedersen and Johanna Vanel and Haakon O. Leira and Thomas Langø},
title = {Mediastinal lymph nodes segmentation using 3D convolutional neural network ensembles and anatomical priors guiding},
journal = {Computer Methods in Biomechanics and Biomedical Engineering: Imaging \& Visualization},
volume = {0},
number = {0},
pages = {1-15},
year = {2022},
publisher = {Taylor & Francis},
doi = {10.1080/21681163.2022.2043778},
URL = {https://doi.org/10.1080/21681163.2022.2043778},
eprint = {https://doi.org/10.1080/21681163.2022.2043778}
}
```
## [License](https://github.com/raidionics/LyNoS#license)
The code in this repository is released under [MIT license](https://github.com/raidionics/LyNoS/blob/main/LICENSE). | andreped/LyNoS | [
"task_categories:image-segmentation",
"size_categories:1B<n<10B",
"language:en",
"license:mit",
"medical",
"region:us"
]
| 2023-11-09T19:35:31+00:00 | {"language": ["en"], "license": "mit", "size_categories": ["1B<n<10B"], "task_categories": ["image-segmentation"], "pretty_name": "AeroPath", "tags": ["medical"]} | 2023-11-15T13:11:19+00:00 | []
| [
"en"
]
| TAGS
#task_categories-image-segmentation #size_categories-1B<n<10B #language-English #license-mit #medical #region-us
|
<div align="center">
<h1 align="center"> LyNoS </h1>
<h3 align="center">A multilabel lymph node segmentation dataset from contrast CT</h3>
LyNoS was developed by SINTEF Medical Image Analysis to accelerate medical AI research.
</div>
## Brief intro
This repository contains the LyNoS dataset described in "_Mediastinal lymph nodes segmentation using 3D convolutional neural network ensembles and anatomical priors guiding_".
The dataset has now also been uploaded to Zenodo and the Hugging Face Hub enabling users to more easily access the data through Python API.
We have also developed a web demo to enable others to easily test the pretrained model presented in the paper. The application was developed using Gradio for the frontend and the segmentation is performed using the Raidionics backend.
## Dataset <a href="URL target="_parent"><img src="URL alt="Open In Colab"/></a>
### Accessing dataset
The dataset contains 15 CTs with corresponding lymph nodes, azygos, esophagus, and subclavian carotid arteries. The folder structure is described below.
The easiest way to access the data is through Python with Hugging Face's datasets package:
A detailed interactive demo on how to load and work with the data can be seen on CoLab. Click the CoLab badge <a href="URL target="_parent"><img src="URL alt="Open In Colab"/></a> to see the notebook or alternatively click here to see it on GitHub.
### Dataset structure
### NIH Dataset Completion
A larger dataset made of 90 patients featuring enlarged lymph nodes has also been made available by the National Institutes of Health, and is available for download on the official web-page.
As a supplement to this dataset, lymph nodes segmentation masks have been refined for all patients and stations have been manually assigned to each, available here.
## Demo <a target="_blank" href="URL src="URL
To access the live demo, click on the 'Hugging Face' badge above. Below is a snapshot of the current state of the demo app.
<img width="1400" alt="Screenshot 2023-11-09 at 20 53 29" src="URL
## Development
### Docker
Alternatively, you can deploy the software locally. Note that this is only relevant for development purposes. Simply dockerize the app and run it:
Then open 'http://127.0.0.1:7860' in your favourite internet browser to view the demo.
### Python
It is also possible to run the app locally without Docker. Just setup a virtual environment and run the app.
Note that the current working directory would need to be adjusted based on where 'LyNoS' is located on disk.
## Citation
If you found the dataset and/or web application relevant in your research, please cite the following reference:
## License
The code in this repository is released under MIT license. | [
"## Brief intro\n\nThis repository contains the LyNoS dataset described in \"_Mediastinal lymph nodes segmentation using 3D convolutional neural network ensembles and anatomical priors guiding_\".\nThe dataset has now also been uploaded to Zenodo and the Hugging Face Hub enabling users to more easily access the data through Python API.\n\nWe have also developed a web demo to enable others to easily test the pretrained model presented in the paper. The application was developed using Gradio for the frontend and the segmentation is performed using the Raidionics backend.",
"## Dataset <a href=\"URL target=\"_parent\"><img src=\"URL alt=\"Open In Colab\"/></a>",
"### Accessing dataset\n\nThe dataset contains 15 CTs with corresponding lymph nodes, azygos, esophagus, and subclavian carotid arteries. The folder structure is described below.\n\nThe easiest way to access the data is through Python with Hugging Face's datasets package:\n\n\nA detailed interactive demo on how to load and work with the data can be seen on CoLab. Click the CoLab badge <a href=\"URL target=\"_parent\"><img src=\"URL alt=\"Open In Colab\"/></a> to see the notebook or alternatively click here to see it on GitHub.",
"### Dataset structure",
"### NIH Dataset Completion\nA larger dataset made of 90 patients featuring enlarged lymph nodes has also been made available by the National Institutes of Health, and is available for download on the official web-page.\nAs a supplement to this dataset, lymph nodes segmentation masks have been refined for all patients and stations have been manually assigned to each, available here.",
"## Demo <a target=\"_blank\" href=\"URL src=\"URL\n\nTo access the live demo, click on the 'Hugging Face' badge above. Below is a snapshot of the current state of the demo app.\n\n<img width=\"1400\" alt=\"Screenshot 2023-11-09 at 20 53 29\" src=\"URL",
"## Development",
"### Docker\n\nAlternatively, you can deploy the software locally. Note that this is only relevant for development purposes. Simply dockerize the app and run it:\n\n\n\nThen open 'http://127.0.0.1:7860' in your favourite internet browser to view the demo.",
"### Python\n\nIt is also possible to run the app locally without Docker. Just setup a virtual environment and run the app.\nNote that the current working directory would need to be adjusted based on where 'LyNoS' is located on disk.",
"## Citation\n\nIf you found the dataset and/or web application relevant in your research, please cite the following reference:",
"## License\n\nThe code in this repository is released under MIT license."
]
| [
"TAGS\n#task_categories-image-segmentation #size_categories-1B<n<10B #language-English #license-mit #medical #region-us \n",
"## Brief intro\n\nThis repository contains the LyNoS dataset described in \"_Mediastinal lymph nodes segmentation using 3D convolutional neural network ensembles and anatomical priors guiding_\".\nThe dataset has now also been uploaded to Zenodo and the Hugging Face Hub enabling users to more easily access the data through Python API.\n\nWe have also developed a web demo to enable others to easily test the pretrained model presented in the paper. The application was developed using Gradio for the frontend and the segmentation is performed using the Raidionics backend.",
"## Dataset <a href=\"URL target=\"_parent\"><img src=\"URL alt=\"Open In Colab\"/></a>",
"### Accessing dataset\n\nThe dataset contains 15 CTs with corresponding lymph nodes, azygos, esophagus, and subclavian carotid arteries. The folder structure is described below.\n\nThe easiest way to access the data is through Python with Hugging Face's datasets package:\n\n\nA detailed interactive demo on how to load and work with the data can be seen on CoLab. Click the CoLab badge <a href=\"URL target=\"_parent\"><img src=\"URL alt=\"Open In Colab\"/></a> to see the notebook or alternatively click here to see it on GitHub.",
"### Dataset structure",
"### NIH Dataset Completion\nA larger dataset made of 90 patients featuring enlarged lymph nodes has also been made available by the National Institutes of Health, and is available for download on the official web-page.\nAs a supplement to this dataset, lymph nodes segmentation masks have been refined for all patients and stations have been manually assigned to each, available here.",
"## Demo <a target=\"_blank\" href=\"URL src=\"URL\n\nTo access the live demo, click on the 'Hugging Face' badge above. Below is a snapshot of the current state of the demo app.\n\n<img width=\"1400\" alt=\"Screenshot 2023-11-09 at 20 53 29\" src=\"URL",
"## Development",
"### Docker\n\nAlternatively, you can deploy the software locally. Note that this is only relevant for development purposes. Simply dockerize the app and run it:\n\n\n\nThen open 'http://127.0.0.1:7860' in your favourite internet browser to view the demo.",
"### Python\n\nIt is also possible to run the app locally without Docker. Just setup a virtual environment and run the app.\nNote that the current working directory would need to be adjusted based on where 'LyNoS' is located on disk.",
"## Citation\n\nIf you found the dataset and/or web application relevant in your research, please cite the following reference:",
"## License\n\nThe code in this repository is released under MIT license."
]
| [
42,
131,
32,
140,
5,
87,
76,
2,
62,
53,
25,
15
]
| [
"passage: TAGS\n#task_categories-image-segmentation #size_categories-1B<n<10B #language-English #license-mit #medical #region-us \n## Brief intro\n\nThis repository contains the LyNoS dataset described in \"_Mediastinal lymph nodes segmentation using 3D convolutional neural network ensembles and anatomical priors guiding_\".\nThe dataset has now also been uploaded to Zenodo and the Hugging Face Hub enabling users to more easily access the data through Python API.\n\nWe have also developed a web demo to enable others to easily test the pretrained model presented in the paper. The application was developed using Gradio for the frontend and the segmentation is performed using the Raidionics backend.## Dataset <a href=\"URL target=\"_parent\"><img src=\"URL alt=\"Open In Colab\"/></a>### Accessing dataset\n\nThe dataset contains 15 CTs with corresponding lymph nodes, azygos, esophagus, and subclavian carotid arteries. The folder structure is described below.\n\nThe easiest way to access the data is through Python with Hugging Face's datasets package:\n\n\nA detailed interactive demo on how to load and work with the data can be seen on CoLab. Click the CoLab badge <a href=\"URL target=\"_parent\"><img src=\"URL alt=\"Open In Colab\"/></a> to see the notebook or alternatively click here to see it on GitHub.### Dataset structure### NIH Dataset Completion\nA larger dataset made of 90 patients featuring enlarged lymph nodes has also been made available by the National Institutes of Health, and is available for download on the official web-page.\nAs a supplement to this dataset, lymph nodes segmentation masks have been refined for all patients and stations have been manually assigned to each, available here."
]
|
dfa6ce0f26d4fe14ef1162007f824e0a4468e180 | # Dataset Card for "bw_spec_cls_80_38"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_38 | [
"region:us"
]
| 2023-11-09T19:43:17+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "86720", "1": "86721", "2": "86724", "3": "86725", "4": "86730", "5": "86761", "6": "86762", "7": "86763", "8": "86793", "9": "86795", "10": "86799", "11": "87068", "12": "87070", "13": "87096", "14": "87097", "15": "87098", "16": "87099", "17": "87100", "18": "87101", "19": "87102", "20": "87103", "21": "87104", "22": "87105", "23": "87106", "24": "87107", "25": "87108", "26": "87121", "27": "87151", "28": "87152", "29": "87153", "30": "87154", "31": "87155", "32": "87157", "33": "87158", "34": "87159", "35": "87160", "36": "87161", "37": "87185", "38": "87186", "39": "87187", "40": "87188", "41": "87189", "42": "87190", "43": "87191", "44": "87192", "45": "87193", "46": "87194", "47": "87237", "48": "87322", "49": "87323", "50": "87324", "51": "87325", "52": "87361", "53": "87362", "54": "87363", "55": "87377", "56": "87430", "57": "87431", "58": "87639", "59": "87641", "60": "87642", "61": "87643", "62": "87644", "63": "87645", "64": "87967", "65": "87968", "66": "87971", "67": "88428", "68": "88429", "69": "88486", "70": "88846", "71": "88848", "72": "88854", "73": "88856", "74": "88858", "75": "88860", "76": "88861", "77": "88863", "78": "88864", "79": "88867"}}}}], "splits": [{"name": "train", "num_bytes": 87451164.8, "num_examples": 1600}, {"name": "test", "num_bytes": 21839251.0, "num_examples": 400}], "download_size": 109270034, "dataset_size": 109290415.8}} | 2023-11-09T19:43:37+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_38"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_38\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_38\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_38\"\n\nMore Information needed"
]
|
d105ab96c88c13f43dbd9f020958b3297b5b685d | # Dataset Card for "repe_emotions_function"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | justinphan3110/repe_emotions_function | [
"region:us"
]
| 2023-11-09T19:43:43+00:00 | {"dataset_info": {"features": [{"name": "sentence", "sequence": "string"}, {"name": "label", "sequence": "bool"}], "splits": [{"name": "happiness", "num_bytes": 97021, "num_examples": 582}, {"name": "sadness", "num_bytes": 97108, "num_examples": 582}, {"name": "anger", "num_bytes": 96249, "num_examples": 582}, {"name": "fear", "num_bytes": 96764, "num_examples": 582}, {"name": "disgust", "num_bytes": 97960, "num_examples": 582}, {"name": "surprise", "num_bytes": 98722, "num_examples": 582}], "download_size": 100495, "dataset_size": 583824}} | 2023-11-09T19:43:49+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "repe_emotions_function"
More Information needed | [
"# Dataset Card for \"repe_emotions_function\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"repe_emotions_function\"\n\nMore Information needed"
]
| [
6,
17
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"repe_emotions_function\"\n\nMore Information needed"
]
|
6c0b4fb24394f595917ae42cb006009941a80201 | # Dataset Card for "foamy"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | vincentracine/foamy | [
"region:us"
]
| 2023-11-09T19:59:24+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "info", "struct": [{"name": "description", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "version", "dtype": "string"}, {"name": "year", "dtype": "int64"}, {"name": "contributor", "dtype": "string"}, {"name": "date_created", "dtype": "string"}]}, {"name": "licenses", "list": [{"name": "url", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "name", "dtype": "string"}]}, {"name": "images", "list": [{"name": "license", "dtype": "int64"}, {"name": "file_name", "dtype": "string"}, {"name": "coco_url", "dtype": "string"}, {"name": "height", "dtype": "int64"}, {"name": "width", "dtype": "int64"}, {"name": "date_captured", "dtype": "string"}, {"name": "flickr_url", "dtype": "string"}, {"name": "darwin_url", "dtype": "string"}, {"name": "darwin_workview_url", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "tag_ids", "sequence": "null"}]}, {"name": "annotations", "list": [{"name": "id", "dtype": "int64"}, {"name": "image_id", "dtype": "int64"}, {"name": "category_id", "dtype": "int64"}, {"name": "segmentation", "sequence": {"sequence": "float64"}}, {"name": "area", "dtype": "float64"}, {"name": "bbox", "sequence": "float64"}, {"name": "iscrowd", "dtype": "int64"}]}, {"name": "categories", "list": [{"name": "id", "dtype": "int64"}, {"name": "name", "dtype": "string"}, {"name": "supercategory", "dtype": "string"}]}, {"name": "tag_categories", "sequence": "null"}], "splits": [{"name": "train", "num_bytes": 2721, "num_examples": 1}, {"name": "validation", "num_bytes": 2721, "num_examples": 1}, {"name": "test", "num_bytes": 2721, "num_examples": 1}], "download_size": 61983, "dataset_size": 8163}} | 2023-11-09T20:06:57+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "foamy"
More Information needed | [
"# Dataset Card for \"foamy\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"foamy\"\n\nMore Information needed"
]
| [
6,
12
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"foamy\"\n\nMore Information needed"
]
|
533a68f2589bdd1860f3d177bdfe137e1b3424b8 | # Dataset Card for "codegen"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | pkboom/codegen | [
"region:us"
]
| 2023-11-09T20:00:34+00:00 | {"dataset_info": {"features": [{"name": "repo_id", "dtype": "string"}, {"name": "file_path", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1077609, "num_examples": 272}], "download_size": 370145, "dataset_size": 1077609}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-09T20:12:42+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "codegen"
More Information needed | [
"# Dataset Card for \"codegen\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"codegen\"\n\nMore Information needed"
]
| [
6,
12
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"codegen\"\n\nMore Information needed"
]
|
885520f7557d3ca5f81f6529b76a8e46cbac8783 | # Dataset Card for "dataset_2000_decompese_question_1"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | presencesw/dataset_2000_decompese_question_1 | [
"region:us"
]
| 2023-11-09T20:08:21+00:00 | {"dataset_info": {"features": [{"name": "entities", "sequence": "null"}, {"name": "triplets", "list": [{"name": "question", "dtype": "string"}]}, {"name": "answer", "dtype": "string"}, {"name": "complex_question", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 67820, "num_examples": 199}], "download_size": 26545, "dataset_size": 67820}} | 2023-11-09T20:08:22+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "dataset_2000_decompese_question_1"
More Information needed | [
"# Dataset Card for \"dataset_2000_decompese_question_1\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"dataset_2000_decompese_question_1\"\n\nMore Information needed"
]
| [
6,
22
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"dataset_2000_decompese_question_1\"\n\nMore Information needed"
]
|
51b3f6093b4c1b8e11a13887fa7b40b77b7fcd80 | # Dataset Card for "bw_spec_cls_80_39"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_39 | [
"region:us"
]
| 2023-11-09T20:08:49+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "88868", "1": "88869", "2": "88870", "3": "88871", "4": "88872", "5": "88873", "6": "88874", "7": "88875", "8": "88876", "9": "88877", "10": "88878", "11": "88879", "12": "88892", "13": "88899", "14": "88900", "15": "88959", "16": "88960", "17": "89212", "18": "89350", "19": "89376", "20": "89441", "21": "89447", "22": "89456", "23": "89473", "24": "89474", "25": "89482", "26": "89484", "27": "89485", "28": "89486", "29": "89639", "30": "89704", "31": "89814", "32": "89815", "33": "89816", "34": "89817", "35": "89841", "36": "89843", "37": "89846", "38": "89847", "39": "89848", "40": "89857", "41": "89859", "42": "89860", "43": "89991", "44": "89992", "45": "90027", "46": "90074", "47": "90278", "48": "90570", "49": "90579", "50": "90582", "51": "90583", "52": "90587", "53": "90589", "54": "90590", "55": "90591", "56": "90592", "57": "90625", "58": "90639", "59": "90652", "60": "90695", "61": "90804", "62": "90824", "63": "90826", "64": "90828", "65": "90982", "66": "90987", "67": "90993", "68": "91081", "69": "91082", "70": "91083", "71": "91084", "72": "91085", "73": "91086", "74": "91087", "75": "91088", "76": "91089", "77": "91092", "78": "91093", "79": "91098"}}}}], "splits": [{"name": "train", "num_bytes": 87209993.6, "num_examples": 1600}, {"name": "test", "num_bytes": 21622551.0, "num_examples": 400}], "download_size": 108195961, "dataset_size": 108832544.6}} | 2023-11-09T20:09:06+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_39"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_39\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_39\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_39\"\n\nMore Information needed"
]
|
d7eaf5c9096d9cb86f7441ef85a4abaeb5990f4a | # Dataset Card for "xlsum_data-xlsum_temario_results"
rouge= {'rouge1': 0.29061682940043887, 'rouge2': 0.10841830904619996, 'rougeL': 0.20082902646081413, 'rougeLsum': 0.20082902646081413}
bert= {'precision': 0.7047167878616147, 'recall': 0.7486215781667092, 'f1': 0.7253076068366446}
mover = 0.59869974702815 | arthurmluz/xlsum_data-xlsum_temario_results | [
"region:us"
]
| 2023-11-09T20:14:25+00:00 | {"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 28155830, "num_examples": 7175}], "download_size": 17248185, "dataset_size": 28155830}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]} | 2023-11-13T20:58:31+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "xlsum_data-xlsum_temario_results"
rouge= {'rouge1': 0.29061682940043887, 'rouge2': 0.10841830904619996, 'rougeL': 0.20082902646081413, 'rougeLsum': 0.20082902646081413}
bert= {'precision': 0.7047167878616147, 'recall': 0.7486215781667092, 'f1': 0.7253076068366446}
mover = 0.59869974702815 | [
"# Dataset Card for \"xlsum_data-xlsum_temario_results\"\n\nrouge= {'rouge1': 0.29061682940043887, 'rouge2': 0.10841830904619996, 'rougeL': 0.20082902646081413, 'rougeLsum': 0.20082902646081413}\n\nbert= {'precision': 0.7047167878616147, 'recall': 0.7486215781667092, 'f1': 0.7253076068366446}\n\nmover = 0.59869974702815"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"xlsum_data-xlsum_temario_results\"\n\nrouge= {'rouge1': 0.29061682940043887, 'rouge2': 0.10841830904619996, 'rougeL': 0.20082902646081413, 'rougeLsum': 0.20082902646081413}\n\nbert= {'precision': 0.7047167878616147, 'recall': 0.7486215781667092, 'f1': 0.7253076068366446}\n\nmover = 0.59869974702815"
]
| [
6,
140
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"xlsum_data-xlsum_temario_results\"\n\nrouge= {'rouge1': 0.29061682940043887, 'rouge2': 0.10841830904619996, 'rougeL': 0.20082902646081413, 'rougeLsum': 0.20082902646081413}\n\nbert= {'precision': 0.7047167878616147, 'recall': 0.7486215781667092, 'f1': 0.7253076068366446}\n\nmover = 0.59869974702815"
]
|
6a815696bd66c90ea12c5ca00ab2a79116a3d37e | # Dataset Card for "bw_spec_cls_80_40"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_40 | [
"region:us"
]
| 2023-11-09T20:34:54+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "91102", "1": "91130", "2": "91157", "3": "91158", "4": "91159", "5": "91160", "6": "91161", "7": "91162", "8": "91163", "9": "91164", "10": "91177", "11": "91178", "12": "91179", "13": "91181", "14": "91182", "15": "91183", "16": "91184", "17": "91185", "18": "91186", "19": "91187", "20": "91205", "21": "91228", "22": "91238", "23": "91306", "24": "91309", "25": "91312", "26": "91315", "27": "91317", "28": "91318", "29": "91319", "30": "91329", "31": "91349", "32": "91443", "33": "91455", "34": "91458", "35": "91459", "36": "91619", "37": "91620", "38": "91621", "39": "91622", "40": "91623", "41": "91624", "42": "91625", "43": "91755", "44": "91788", "45": "91790", "46": "91791", "47": "91793", "48": "91796", "49": "91797", "50": "91851", "51": "91868", "52": "91869", "53": "91894", "54": "91897", "55": "91899", "56": "91900", "57": "91933", "58": "91934", "59": "91936", "60": "91937", "61": "91938", "62": "91958", "63": "91960", "64": "92124", "65": "92125", "66": "92129", "67": "92130", "68": "92131", "69": "92206", "70": "92275", "71": "92282", "72": "92283", "73": "92284", "74": "92292", "75": "92466", "76": "92508", "77": "92535", "78": "92536", "79": "92538"}}}}], "splits": [{"name": "train", "num_bytes": 91402580.8, "num_examples": 1600}, {"name": "test", "num_bytes": 22793187.0, "num_examples": 400}], "download_size": 114027453, "dataset_size": 114195767.8}} | 2023-11-09T20:35:12+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_40"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_40\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_40\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_40\"\n\nMore Information needed"
]
|
796edc7e5a416fcd2c87d7ac2c6c8aced2595597 | # Dataset Card for "font-examples"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | gaborcselle/font-examples | [
"region:us"
]
| 2023-11-09T20:43:25+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "AlfaSlabOne-Regular", "1": "ArchitectsDaughter-Regular", "2": "Arial", "3": "Arial Black", "4": "Arial Bold", "5": "Arial Bold Italic", "6": "Avenir", "7": "Bangers-Regular", "8": "BlackOpsOne-Regular", "9": "Courier", "10": "Georgia", "11": "Helvetica", "12": "IBMPlexSans-Regular", "13": "Inter-Regular", "14": "KaushanScript-Regular", "15": "Lato-Regular", "16": "Lobster-Regular", "17": "Lora-Regular", "18": "Merriweather-Regular", "19": "Niconne-Regular", "20": "OpenSans-Bold", "21": "OpenSans-Italic", "22": "OpenSans-Light", "23": "Pacifico-Regular", "24": "PixelifySans-Regular", "25": "PlayfairDisplay-Regular", "26": "Poppins-Regular", "27": "Rakkas-Regular", "28": "Roboto-Regular", "29": "RobotoMono-Regular", "30": "RobotoSlab-Regular", "31": "Rubik-Regular", "32": "SpaceMono-Regular", "33": "Tahoma", "34": "Tahoma Bold", "35": "Times New Roman", "36": "Times New Roman Bold", "37": "Times New Roman Bold Italic", "38": "Times New Roman Italic", "39": "TitilliumWeb-Regular", "40": "Trebuchet MS", "41": "Trebuchet MS Bold", "42": "Trebuchet MS Bold Italic", "43": "Trebuchet MS Italic", "44": "Verdana", "45": "Verdana Bold", "46": "Verdana Bold Italic", "47": "Verdana Italic"}}}}], "splits": [{"name": "train", "num_bytes": 108384385.6, "num_examples": 2400}], "download_size": 104995129, "dataset_size": 108384385.6}} | 2023-11-09T20:43:44+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "font-examples"
More Information needed | [
"# Dataset Card for \"font-examples\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"font-examples\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"font-examples\"\n\nMore Information needed"
]
|
6ac8bd53d5a436204307ab7a0a2dcb9ad80850c0 | # Dataset Card for "bw_spec_cls_80_41"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_41 | [
"region:us"
]
| 2023-11-09T21:01:18+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "92539", "1": "92540", "2": "92546", "3": "92548", "4": "92551", "5": "92554", "6": "92556", "7": "92561", "8": "92562", "9": "92564", "10": "92565", "11": "92573", "12": "92574", "13": "92868", "14": "92872", "15": "92873", "16": "92874", "17": "92878", "18": "92881", "19": "92885", "20": "92886", "21": "92887", "22": "92888", "23": "92889", "24": "92947", "25": "92948", "26": "92949", "27": "92950", "28": "92951", "29": "92952", "30": "92953", "31": "92954", "32": "92955", "33": "93363", "34": "93364", "35": "93704", "36": "93710", "37": "93712", "38": "93716", "39": "93727", "40": "93867", "41": "93868", "42": "93915", "43": "93917", "44": "93918", "45": "93919", "46": "93920", "47": "93921", "48": "93940", "49": "93941", "50": "93942", "51": "93943", "52": "93944", "53": "93981", "54": "94026", "55": "94033", "56": "94034", "57": "94035", "58": "94036", "59": "94037", "60": "94038", "61": "94039", "62": "94263", "63": "94348", "64": "94411", "65": "94414", "66": "94415", "67": "94419", "68": "94422", "69": "94423", "70": "94426", "71": "94449", "72": "94465", "73": "94467", "74": "94468", "75": "94628", "76": "94630", "77": "94631", "78": "94632", "79": "94634"}}}}], "splits": [{"name": "train", "num_bytes": 84907918.4, "num_examples": 1600}, {"name": "test", "num_bytes": 21650301.0, "num_examples": 400}], "download_size": 108113820, "dataset_size": 106558219.4}} | 2023-11-09T21:01:34+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_41"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_41\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_41\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_41\"\n\nMore Information needed"
]
|
98fe36bc853b46aac9678606f67516b2313d1b11 | # Glue-STSB with Contrastive Axes
Dataset format:
A pair of sentences, and a prompt along which the sentences are similar or different.
Includes embeddings generated by `sentence-transformers`.
`text_a` and `text_b` are from the Glue-STSB dataset, `prompt` and `label` are machine generated.
| iamroot/stsb-contrastive-axes | [
"region:us"
]
| 2023-11-09T21:02:57+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text_a_embedding", "sequence": "float32"}, {"name": "text_b_embedding", "sequence": "float32"}, {"name": "prompt_embedding", "sequence": "float32"}, {"name": "text_a", "dtype": "string"}, {"name": "text_b", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 219575612.0, "num_examples": 23388}, {"name": "test", "num_bytes": 54893903.0, "num_examples": 5847}], "download_size": 311913820, "dataset_size": 274469515.0}} | 2023-11-15T19:26:01+00:00 | []
| []
| TAGS
#region-us
| # Glue-STSB with Contrastive Axes
Dataset format:
A pair of sentences, and a prompt along which the sentences are similar or different.
Includes embeddings generated by 'sentence-transformers'.
'text_a' and 'text_b' are from the Glue-STSB dataset, 'prompt' and 'label' are machine generated.
| [
"# Glue-STSB with Contrastive Axes\n\nDataset format:\n\nA pair of sentences, and a prompt along which the sentences are similar or different.\n\nIncludes embeddings generated by 'sentence-transformers'.\n\n'text_a' and 'text_b' are from the Glue-STSB dataset, 'prompt' and 'label' are machine generated."
]
| [
"TAGS\n#region-us \n",
"# Glue-STSB with Contrastive Axes\n\nDataset format:\n\nA pair of sentences, and a prompt along which the sentences are similar or different.\n\nIncludes embeddings generated by 'sentence-transformers'.\n\n'text_a' and 'text_b' are from the Glue-STSB dataset, 'prompt' and 'label' are machine generated."
]
| [
6,
89
]
| [
"passage: TAGS\n#region-us \n# Glue-STSB with Contrastive Axes\n\nDataset format:\n\nA pair of sentences, and a prompt along which the sentences are similar or different.\n\nIncludes embeddings generated by 'sentence-transformers'.\n\n'text_a' and 'text_b' are from the Glue-STSB dataset, 'prompt' and 'label' are machine generated."
]
|
1840c27fe18e0e9ac589e85ac82c055923d8e7c8 | # Dataset Card for "Israel-palestine-war"
This Demo dataset is related to the research paper entitle "Online News Channel Streaming: A Comprehensive Analysis of Channel and User Engagement during the Israel-Palestine Conflict".
PREPRINT (Version 1) available at Research Square [https://www.researchsquare.com/article/rs-3927576/latest].

User Comments on News YouTube channels During Current War of Palstine & Israel Oct-2023.
Demo dataset size: {'NBCNews': 188490, 'aljazeeraenglish': 100164, 'CNN': 182272, 'Piers Morgan Uncensored': 128093, 'BBCNews': 267834, 'FoxNews': 161493}
total number of videos: 310 total number of comments: 1028346
## Journal Publication Statues:
Submitted
# Citation:-
Akram Alsubari, M. Alhiba, Ahlam Enan, Najran. N. H. Aldawla and M. Alsurori, "Online News Channel Streaming: A Comprehensive Analysis of Channel and User Engagement during the Israel-Palestine Conflict",PREPRINT,2024, DOI:[https://doi.org/10.21203/rs.3.rs-3927576/v1]
Or
Akram Alsubari, M. Alhiba, Ahlam Enan et al. Online News Channel Streaming: A Comprehensive Analysis of Channel and User Engagement during the Israel-Palestine Conflict, 06 February 2024, PREPRINT (Version 1) available at Research Square [https://doi.org/10.21203/rs.3.rs-3927576/v1]
| alsubari/Israel-palestine-war | [
"task_categories:text-classification",
"task_categories:text-generation",
"task_categories:conversational",
"task_categories:summarization",
"task_categories:text2text-generation",
"task_categories:sentence-similarity",
"task_categories:zero-shot-classification",
"task_categories:fill-mask",
"language:en",
"license:afl-3.0",
"not-for-all-audiences",
"doi:10.57967/hf/1747",
"region:us"
]
| 2023-11-09T21:10:23+00:00 | {"language": ["en"], "license": "afl-3.0", "task_categories": ["text-classification", "text-generation", "conversational", "summarization", "text2text-generation", "sentence-similarity", "zero-shot-classification", "fill-mask"], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "publish_channel", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "start_duration", "dtype": "string"}, {"name": "end_duration", "dtype": "string"}, {"name": "lengthSeconds", "dtype": "int64"}, {"name": "total_comments", "dtype": "int64"}, {"name": "total_unique_users", "dtype": "int64"}, {"name": "comments", "list": [{"name": "author", "dtype": "string"}, {"name": "channel", "dtype": "string"}, {"name": "cid", "dtype": "string"}, {"name": "heart", "dtype": "bool"}, {"name": "reply", "dtype": "bool"}, {"name": "text", "dtype": "string"}, {"name": "time", "dtype": "string"}, {"name": "time_parsed", "dtype": "float64"}, {"name": "votes", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 274986362, "num_examples": 310}], "download_size": 152975094, "dataset_size": 274986362}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "tags": ["not-for-all-audiences"]} | 2024-02-06T15:48:50+00:00 | []
| [
"en"
]
| TAGS
#task_categories-text-classification #task_categories-text-generation #task_categories-conversational #task_categories-summarization #task_categories-text2text-generation #task_categories-sentence-similarity #task_categories-zero-shot-classification #task_categories-fill-mask #language-English #license-afl-3.0 #not-for-all-audiences #doi-10.57967/hf/1747 #region-us
| # Dataset Card for "Israel-palestine-war"
This Demo dataset is related to the research paper entitle "Online News Channel Streaming: A Comprehensive Analysis of Channel and User Engagement during the Israel-Palestine Conflict".
PREPRINT (Version 1) available at Research Square [URL
!URL
User Comments on News YouTube channels During Current War of Palstine & Israel Oct-2023.
Demo dataset size: {'NBCNews': 188490, 'aljazeeraenglish': 100164, 'CNN': 182272, 'Piers Morgan Uncensored': 128093, 'BBCNews': 267834, 'FoxNews': 161493}
total number of videos: 310 total number of comments: 1028346
## Journal Publication Statues:
Submitted
:-
Akram Alsubari, M. Alhiba, Ahlam Enan, Najran. N. H. Aldawla and M. Alsurori, "Online News Channel Streaming: A Comprehensive Analysis of Channel and User Engagement during the Israel-Palestine Conflict",PREPRINT,2024, DOI:[URL
Or
Akram Alsubari, M. Alhiba, Ahlam Enan et al. Online News Channel Streaming: A Comprehensive Analysis of Channel and User Engagement during the Israel-Palestine Conflict, 06 February 2024, PREPRINT (Version 1) available at Research Square [URL
| [
"# Dataset Card for \"Israel-palestine-war\"\nThis Demo dataset is related to the research paper entitle \"Online News Channel Streaming: A Comprehensive Analysis of Channel and User Engagement during the Israel-Palestine Conflict\".\nPREPRINT (Version 1) available at Research Square [URL\n\n!URL\n\nUser Comments on News YouTube channels During Current War of Palstine & Israel Oct-2023.\n\nDemo dataset size: {'NBCNews': 188490, 'aljazeeraenglish': 100164, 'CNN': 182272, 'Piers Morgan Uncensored': 128093, 'BBCNews': 267834, 'FoxNews': 161493}\n\ntotal number of videos: 310 total number of comments: 1028346",
"## Journal Publication Statues: \nSubmitted\n\n:- \nAkram Alsubari, M. Alhiba, Ahlam Enan, Najran. N. H. Aldawla and M. Alsurori, \"Online News Channel Streaming: A Comprehensive Analysis of Channel and User Engagement during the Israel-Palestine Conflict\",PREPRINT,2024, DOI:[URL \n\nOr \n\nAkram Alsubari, M. Alhiba, Ahlam Enan et al. Online News Channel Streaming: A Comprehensive Analysis of Channel and User Engagement during the Israel-Palestine Conflict, 06 February 2024, PREPRINT (Version 1) available at Research Square [URL"
]
| [
"TAGS\n#task_categories-text-classification #task_categories-text-generation #task_categories-conversational #task_categories-summarization #task_categories-text2text-generation #task_categories-sentence-similarity #task_categories-zero-shot-classification #task_categories-fill-mask #language-English #license-afl-3.0 #not-for-all-audiences #doi-10.57967/hf/1747 #region-us \n",
"# Dataset Card for \"Israel-palestine-war\"\nThis Demo dataset is related to the research paper entitle \"Online News Channel Streaming: A Comprehensive Analysis of Channel and User Engagement during the Israel-Palestine Conflict\".\nPREPRINT (Version 1) available at Research Square [URL\n\n!URL\n\nUser Comments on News YouTube channels During Current War of Palstine & Israel Oct-2023.\n\nDemo dataset size: {'NBCNews': 188490, 'aljazeeraenglish': 100164, 'CNN': 182272, 'Piers Morgan Uncensored': 128093, 'BBCNews': 267834, 'FoxNews': 161493}\n\ntotal number of videos: 310 total number of comments: 1028346",
"## Journal Publication Statues: \nSubmitted\n\n:- \nAkram Alsubari, M. Alhiba, Ahlam Enan, Najran. N. H. Aldawla and M. Alsurori, \"Online News Channel Streaming: A Comprehensive Analysis of Channel and User Engagement during the Israel-Palestine Conflict\",PREPRINT,2024, DOI:[URL \n\nOr \n\nAkram Alsubari, M. Alhiba, Ahlam Enan et al. Online News Channel Streaming: A Comprehensive Analysis of Channel and User Engagement during the Israel-Palestine Conflict, 06 February 2024, PREPRINT (Version 1) available at Research Square [URL"
]
| [
131,
168,
147
]
| [
"passage: TAGS\n#task_categories-text-classification #task_categories-text-generation #task_categories-conversational #task_categories-summarization #task_categories-text2text-generation #task_categories-sentence-similarity #task_categories-zero-shot-classification #task_categories-fill-mask #language-English #license-afl-3.0 #not-for-all-audiences #doi-10.57967/hf/1747 #region-us \n# Dataset Card for \"Israel-palestine-war\"\nThis Demo dataset is related to the research paper entitle \"Online News Channel Streaming: A Comprehensive Analysis of Channel and User Engagement during the Israel-Palestine Conflict\".\nPREPRINT (Version 1) available at Research Square [URL\n\n!URL\n\nUser Comments on News YouTube channels During Current War of Palstine & Israel Oct-2023.\n\nDemo dataset size: {'NBCNews': 188490, 'aljazeeraenglish': 100164, 'CNN': 182272, 'Piers Morgan Uncensored': 128093, 'BBCNews': 267834, 'FoxNews': 161493}\n\ntotal number of videos: 310 total number of comments: 1028346## Journal Publication Statues: \nSubmitted\n\n:- \nAkram Alsubari, M. Alhiba, Ahlam Enan, Najran. N. H. Aldawla and M. Alsurori, \"Online News Channel Streaming: A Comprehensive Analysis of Channel and User Engagement during the Israel-Palestine Conflict\",PREPRINT,2024, DOI:[URL \n\nOr \n\nAkram Alsubari, M. Alhiba, Ahlam Enan et al. Online News Channel Streaming: A Comprehensive Analysis of Channel and User Engagement during the Israel-Palestine Conflict, 06 February 2024, PREPRINT (Version 1) available at Research Square [URL"
]
|
5a6c29320fde4436fa56899e874e12738c1a6fa3 | # Dataset Card for "codegen-v2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | pkboom/codegen-v2 | [
"region:us"
]
| 2023-11-09T21:12:30+00:00 | {"dataset_info": {"features": [{"name": "index", "dtype": "int64"}, {"name": "repo_id", "dtype": "string"}, {"name": "file_path", "dtype": "string"}, {"name": "content", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1077609, "num_examples": 272}], "download_size": 370057, "dataset_size": 1077609}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-09T21:12:32+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "codegen-v2"
More Information needed | [
"# Dataset Card for \"codegen-v2\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"codegen-v2\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"codegen-v2\"\n\nMore Information needed"
]
|
330e5588896f986a79d638293abb475de27bbf1c | # Dataset Card for "bw_spec_cls_80_42"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_42 | [
"region:us"
]
| 2023-11-09T21:26:14+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "94635", "1": "94638", "2": "95189", "3": "95231", "4": "95248", "5": "95249", "6": "95250", "7": "95251", "8": "95308", "9": "95309", "10": "95310", "11": "95452", "12": "95506", "13": "95564", "14": "95722", "15": "95724", "16": "95725", "17": "95726", "18": "95727", "19": "95908", "20": "95910", "21": "95911", "22": "95912", "23": "95914", "24": "95915", "25": "96166", "26": "96167", "27": "96168", "28": "96169", "29": "96399", "30": "96400", "31": "96401", "32": "96402", "33": "96403", "34": "96408", "35": "96627", "36": "96657", "37": "96675", "38": "96678", "39": "96692", "40": "96693", "41": "96694", "42": "96695", "43": "96696", "44": "96697", "45": "96698", "46": "96699", "47": "96718", "48": "96726", "49": "96728", "50": "96729", "51": "96730", "52": "96731", "53": "96898", "54": "96900", "55": "96901", "56": "96902", "57": "96935", "58": "96936", "59": "96944", "60": "96945", "61": "96946", "62": "97037", "63": "97041", "64": "97043", "65": "97211", "66": "97215", "67": "97216", "68": "97279", "69": "97283", "70": "97285", "71": "97373", "72": "97374", "73": "97393", "74": "97404", "75": "97406", "76": "97407", "77": "97424", "78": "97540", "79": "97542"}}}}], "splits": [{"name": "train", "num_bytes": 88630908.8, "num_examples": 1600}, {"name": "test", "num_bytes": 21994535.0, "num_examples": 400}], "download_size": 110458426, "dataset_size": 110625443.8}} | 2023-11-09T21:26:32+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_42"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_42\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_42\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_42\"\n\nMore Information needed"
]
|
8caf709b5acadeca3ca47b90f85eee3c4920dd2d | # Dataset Card for "repe_emotions_function_llama2_chat"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | justinphan3110/repe_emotions_function_llama2_chat | [
"region:us"
]
| 2023-11-09T21:33:49+00:00 | {"dataset_info": {"features": [{"name": "sentence", "sequence": "string"}, {"name": "label", "sequence": "bool"}], "splits": [{"name": "happiness", "num_bytes": 82983, "num_examples": 582}, {"name": "sadness", "num_bytes": 83172, "num_examples": 582}, {"name": "anger", "num_bytes": 82272, "num_examples": 582}, {"name": "fear", "num_bytes": 82870, "num_examples": 582}, {"name": "disgust", "num_bytes": 83999, "num_examples": 582}, {"name": "surprise", "num_bytes": 84882, "num_examples": 582}], "download_size": 96046, "dataset_size": 500178}} | 2023-11-09T21:33:54+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "repe_emotions_function_llama2_chat"
More Information needed | [
"# Dataset Card for \"repe_emotions_function_llama2_chat\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"repe_emotions_function_llama2_chat\"\n\nMore Information needed"
]
| [
6,
23
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"repe_emotions_function_llama2_chat\"\n\nMore Information needed"
]
|
2bcb178d437086b32723eb0cb8859dd31d0c5d8a | # Dataset Card for "dataset_2000_decompese_question_2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | presencesw/dataset_2000_decompese_question_2 | [
"region:us"
]
| 2023-11-09T21:50:13+00:00 | {"dataset_info": {"features": [{"name": "entities", "sequence": "null"}, {"name": "triplets", "list": [{"name": "question", "dtype": "string"}]}, {"name": "answer", "dtype": "string"}, {"name": "complex_question", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 69178, "num_examples": 199}], "download_size": 26387, "dataset_size": 69178}} | 2023-11-09T21:50:14+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "dataset_2000_decompese_question_2"
More Information needed | [
"# Dataset Card for \"dataset_2000_decompese_question_2\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"dataset_2000_decompese_question_2\"\n\nMore Information needed"
]
| [
6,
23
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"dataset_2000_decompese_question_2\"\n\nMore Information needed"
]
|
f284049e3331087db1c5c97d7bcf141d34b62b4e | # Dataset Card for "bw_spec_cls_80_43"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | arieg/bw_spec_cls_80_43 | [
"region:us"
]
| 2023-11-09T21:51:23+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "97544", "1": "97545", "2": "97547", "3": "97548", "4": "97568", "5": "97569", "6": "97570", "7": "97585", "8": "97586", "9": "97587", "10": "97588", "11": "97589", "12": "97590", "13": "97690", "14": "97691", "15": "97692", "16": "97697", "17": "97793", "18": "97794", "19": "97813", "20": "97814", "21": "97841", "22": "97844", "23": "97845", "24": "97846", "25": "97847", "26": "97848", "27": "97886", "28": "97887", "29": "97894", "30": "97940", "31": "97958", "32": "97959", "33": "97960", "34": "97961", "35": "97962", "36": "97980", "37": "97986", "38": "97987", "39": "97988", "40": "97989", "41": "98202", "42": "98203", "43": "98204", "44": "98205", "45": "98206", "46": "98227", "47": "98228", "48": "98235", "49": "98236", "50": "98237", "51": "98238", "52": "98251", "53": "98297", "54": "98339", "55": "98565", "56": "98567", "57": "98569", "58": "98573", "59": "98574", "60": "98575", "61": "98576", "62": "98577", "63": "98578", "64": "98579", "65": "98580", "66": "98581", "67": "98582", "68": "98583", "69": "98584", "70": "98585", "71": "98613", "72": "98617", "73": "98618", "74": "98619", "75": "98620", "76": "98621", "77": "98622", "78": "98623", "79": "98624"}}}}], "splits": [{"name": "train", "num_bytes": 86157560.0, "num_examples": 1600}, {"name": "test", "num_bytes": 21584684.0, "num_examples": 400}], "download_size": 107886434, "dataset_size": 107742244.0}} | 2023-11-09T21:51:42+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "bw_spec_cls_80_43"
More Information needed | [
"# Dataset Card for \"bw_spec_cls_80_43\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"bw_spec_cls_80_43\"\n\nMore Information needed"
]
| [
6,
20
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"bw_spec_cls_80_43\"\n\nMore Information needed"
]
|
a09a60fa01062d49ed626e6b1b0b06f06d394a4e | # Dataset Card for "stack_elixir"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | mvkvc/stack_elixir | [
"region:us"
]
| 2023-11-09T21:56:42+00:00 | {"dataset_info": {"features": [{"name": "hexsha", "dtype": "string"}, {"name": "size", "dtype": "int64"}, {"name": "ext", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "max_stars_repo_path", "dtype": "string"}, {"name": "max_stars_repo_name", "dtype": "string"}, {"name": "max_stars_repo_head_hexsha", "dtype": "string"}, {"name": "max_stars_repo_licenses", "sequence": "string"}, {"name": "max_stars_count", "dtype": "int64"}, {"name": "max_stars_repo_stars_event_min_datetime", "dtype": "string"}, {"name": "max_stars_repo_stars_event_max_datetime", "dtype": "string"}, {"name": "max_issues_repo_path", "dtype": "string"}, {"name": "max_issues_repo_name", "dtype": "string"}, {"name": "max_issues_repo_head_hexsha", "dtype": "string"}, {"name": "max_issues_repo_licenses", "sequence": "string"}, {"name": "max_issues_count", "dtype": "int64"}, {"name": "max_issues_repo_issues_event_min_datetime", "dtype": "string"}, {"name": "max_issues_repo_issues_event_max_datetime", "dtype": "string"}, {"name": "max_forks_repo_path", "dtype": "string"}, {"name": "max_forks_repo_name", "dtype": "string"}, {"name": "max_forks_repo_head_hexsha", "dtype": "string"}, {"name": "max_forks_repo_licenses", "sequence": "string"}, {"name": "max_forks_count", "dtype": "int64"}, {"name": "max_forks_repo_forks_event_min_datetime", "dtype": "string"}, {"name": "max_forks_repo_forks_event_max_datetime", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "avg_line_length", "dtype": "float64"}, {"name": "max_line_length", "dtype": "int64"}, {"name": "alphanum_fraction", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 2590151574, "num_examples": 594074}], "download_size": 973079076, "dataset_size": 2590151574}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-09T22:19:39+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "stack_elixir"
More Information needed | [
"# Dataset Card for \"stack_elixir\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"stack_elixir\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"stack_elixir\"\n\nMore Information needed"
]
|
494f8d14279c1b091a75ee6262ecb50bd42c5d22 | # Dataset Card for "llama2-llm"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | andrewatef/llama2-llm | [
"region:us"
]
| 2023-11-09T21:59:32+00:00 | {"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 148923611.0, "num_examples": 516177}], "download_size": 81796375, "dataset_size": 148923611.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2024-01-15T18:58:44+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "llama2-llm"
More Information needed | [
"# Dataset Card for \"llama2-llm\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"llama2-llm\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"llama2-llm\"\n\nMore Information needed"
]
|
efd4bec65227f8cb275156b0438031fe0e347430 | # Dataset Card for "topical-chat"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Fishball02/topical-chat | [
"region:us"
]
| 2023-11-09T22:35:56+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "conversation", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 25238486.960775223, "num_examples": 9705}, {"name": "test", "num_bytes": 2806010.0392247774, "num_examples": 1079}], "download_size": 16100174, "dataset_size": 28044497.0}} | 2023-12-13T21:53:22+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "topical-chat"
More Information needed | [
"# Dataset Card for \"topical-chat\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"topical-chat\"\n\nMore Information needed"
]
| [
6,
14
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"topical-chat\"\n\nMore Information needed"
]
|
01bb90960518a962fbc8d3ce19ed6c92ebdcf85a | # Dataset Card for "dsml_original_loc"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | higgsfield/dsml_original_loc | [
"region:us"
]
| 2023-11-09T22:36:46+00:00 | {"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "completion", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 243684731, "num_examples": 32477}], "download_size": 27760890, "dataset_size": 243684731}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-09T22:36:51+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "dsml_original_loc"
More Information needed | [
"# Dataset Card for \"dsml_original_loc\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"dsml_original_loc\"\n\nMore Information needed"
]
| [
6,
16
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"dsml_original_loc\"\n\nMore Information needed"
]
|
9ca43a18abd0132a8d6ce375feacccc2be5f702d | # Dataset Card for "mscoco_augmented"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | BubbleJoe/mscoco_augmented | [
"region:us"
]
| 2023-11-09T23:17:50+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "restval", "path": "data/restval-*"}]}], "dataset_info": {"features": [{"name": "sentids", "dtype": "int64"}, {"name": "original", "dtype": "string"}, {"name": "role_reversed", "dtype": "string"}, {"name": "relation_reversed", "dtype": "string"}, {"name": "world_knowledge", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 107964962, "num_examples": 414113}, {"name": "test", "num_bytes": 6489292, "num_examples": 25010}, {"name": "validation", "num_bytes": 6517947, "num_examples": 25010}, {"name": "restval", "num_bytes": 39760811, "num_examples": 152634}], "download_size": 20725603, "dataset_size": 160733012}} | 2023-11-14T05:09:32+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "mscoco_augmented"
More Information needed | [
"# Dataset Card for \"mscoco_augmented\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"mscoco_augmented\"\n\nMore Information needed"
]
| [
6,
17
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"mscoco_augmented\"\n\nMore Information needed"
]
|
9577e4c4bf0e0454735b61be3447542a9a9a8ed7 | # Dataset Card for "medquad"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Tonic/medquad | [
"region:us"
]
| 2023-11-09T23:18:38+00:00 | {"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 7324638, "num_examples": 15549}], "download_size": 2208412, "dataset_size": 7324638}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2024-01-13T08:01:26+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "medquad"
More Information needed | [
"# Dataset Card for \"medquad\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"medquad\"\n\nMore Information needed"
]
| [
6,
12
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"medquad\"\n\nMore Information needed"
]
|
5bc8dd329aba99a9783ffe32164be524d588c1d6 |
Code to test on Colab
!pip install -q transformers[torch] tokenizers datasets evaluate rouge_score sentencepiece huggingface_hub --upgrade
from huggingface_hub import notebook_login
notebook_login()
import nltk
from datasets import load_dataset
import evaluate
import numpy as np
from transformers import T5Tokenizer, DataCollatorForSeq2Seq
from transformers import T5ForConditionalGeneration, Seq2SeqTrainingArguments, Seq2SeqTrainer
# Load and split the dataset
dataset = load_dataset("ajsbsd/presto")
dataset = dataset["train"].train_test_split(test_size=0.2)
#dataset = load_dataset("csv", data_files="./JEOPARDY_CSV.csv")
#dataset = dataset["train"].train_test_split(test_size=0.2)
# Load the tokenizer, model, and data collator
tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small")
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-small")
data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)
# We prefix our tasks with "answer the question"
prefix = "answer the question: "
# Define our preprocessing function
def preprocess_function(examples):
"""Add prefix to the sentences, tokenize the text, and set the labels"""
# The "inputs" are the tokenized answer:
inputs = [prefix + doc for doc in examples["inputs"]]
model_inputs = tokenizer(inputs, max_length=128, truncation=True)
# The "labels" are the tokenized outputs:
labels = tokenizer(text_target=examples["targets"], max_length=512, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
# Map the preprocessing function across our dataset
tokenized_dataset = dataset.map(preprocess_function, batched=True)
# Set up Rouge score for evaluation
nltk.download("punkt", quiet=True)
metric = evaluate.load("rouge")
def compute_metrics(eval_preds):
preds, labels = eval_preds
# decode preds and labels
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# rougeLSum expects newline after each sentence
decoded_preds = ["\n".join(nltk.sent_tokenize(pred.strip())) for pred in decoded_preds]
decoded_labels = ["\n".join(nltk.sent_tokenize(label.strip())) for label in decoded_labels]
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
return result
# Set up training arguments
training_args = Seq2SeqTrainingArguments(
output_dir="./results",
evaluation_strategy="epoch",
learning_rate=3e-4,
per_device_train_batch_size=8,
per_device_eval_batch_size=4,
weight_decay=0.01,
save_total_limit=3,
num_train_epochs=2,
predict_with_generate=True,
push_to_hub=False
)
# Set up trainer
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset["train"],
eval_dataset=tokenized_dataset["test"],
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics
)
# Train the model
trainer.train()
# Push to HF :)
trainer.push_to_hub() | ajsbsd/presto | [
"language:en",
"license:cc-by-4.0",
"region:us"
]
| 2023-11-09T23:31:43+00:00 | {"language": ["en"], "license": "cc-by-4.0", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "metadata", "struct": [{"name": "locale", "dtype": "string"}, {"name": "example_id", "dtype": "string"}, {"name": "seeded_lists", "list": [{"name": "name", "dtype": "string"}, {"name": "items", "sequence": "string"}]}, {"name": "seeded_notes", "list": [{"name": "name", "dtype": "string"}, {"name": "content", "dtype": "string"}]}, {"name": "seeded_contacts", "sequence": "string"}, {"name": "previous_turns", "list": [{"name": "user_query", "dtype": "string"}, {"name": "response_text", "dtype": "string"}]}, {"name": "linguistic_phenomena", "dtype": "string"}, {"name": "split", "dtype": "string"}, {"name": "context", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 24777921, "num_examples": 33577}], "download_size": 6999588, "dataset_size": 24777921}} | 2023-11-09T23:57:46+00:00 | []
| [
"en"
]
| TAGS
#language-English #license-cc-by-4.0 #region-us
|
Code to test on Colab
!pip install -q transformers[torch] tokenizers datasets evaluate rouge_score sentencepiece huggingface_hub --upgrade
from huggingface_hub import notebook_login
notebook_login()
import nltk
from datasets import load_dataset
import evaluate
import numpy as np
from transformers import T5Tokenizer, DataCollatorForSeq2Seq
from transformers import T5ForConditionalGeneration, Seq2SeqTrainingArguments, Seq2SeqTrainer
# Load and split the dataset
dataset = load_dataset("ajsbsd/presto")
dataset = dataset["train"].train_test_split(test_size=0.2)
#dataset = load_dataset("csv", data_files="./JEOPARDY_CSV.csv")
#dataset = dataset["train"].train_test_split(test_size=0.2)
# Load the tokenizer, model, and data collator
tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small")
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-small")
data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)
# We prefix our tasks with "answer the question"
prefix = "answer the question: "
# Define our preprocessing function
def preprocess_function(examples):
"""Add prefix to the sentences, tokenize the text, and set the labels"""
# The "inputs" are the tokenized answer:
inputs = [prefix + doc for doc in examples["inputs"]]
model_inputs = tokenizer(inputs, max_length=128, truncation=True)
# The "labels" are the tokenized outputs:
labels = tokenizer(text_target=examples["targets"], max_length=512, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
# Map the preprocessing function across our dataset
tokenized_dataset = URL(preprocess_function, batched=True)
# Set up Rouge score for evaluation
nltk.download("punkt", quiet=True)
metric = URL("rouge")
def compute_metrics(eval_preds):
preds, labels = eval_preds
# decode preds and labels
labels = URL(labels != -100, labels, tokenizer.pad_token_id)
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# rougeLSum expects newline after each sentence
decoded_preds = ["\n".join(nltk.sent_tokenize(URL())) for pred in decoded_preds]
decoded_labels = ["\n".join(nltk.sent_tokenize(URL())) for label in decoded_labels]
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
return result
# Set up training arguments
training_args = Seq2SeqTrainingArguments(
output_dir="./results",
evaluation_strategy="epoch",
learning_rate=3e-4,
per_device_train_batch_size=8,
per_device_eval_batch_size=4,
weight_decay=0.01,
save_total_limit=3,
num_train_epochs=2,
predict_with_generate=True,
push_to_hub=False
)
# Set up trainer
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset["train"],
eval_dataset=tokenized_dataset["test"],
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics
)
# Train the model
URL()
# Push to HF :)
trainer.push_to_hub() | [
"# Load and split the dataset\ndataset = load_dataset(\"ajsbsd/presto\")\ndataset = dataset[\"train\"].train_test_split(test_size=0.2)",
"# Load the tokenizer, model, and data collator\ntokenizer = T5Tokenizer.from_pretrained(\"google/flan-t5-small\")\nmodel = T5ForConditionalGeneration.from_pretrained(\"google/flan-t5-small\")\ndata_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)",
"# We prefix our tasks with \"answer the question\"\nprefix = \"answer the question: \"",
"# Define our preprocessing function\ndef preprocess_function(examples):\n \"\"\"Add prefix to the sentences, tokenize the text, and set the labels\"\"\"\n # The \"inputs\" are the tokenized answer:\n inputs = [prefix + doc for doc in examples[\"inputs\"]]\n model_inputs = tokenizer(inputs, max_length=128, truncation=True)\n\n # The \"labels\" are the tokenized outputs:\n labels = tokenizer(text_target=examples[\"targets\"], max_length=512, truncation=True)\n model_inputs[\"labels\"] = labels[\"input_ids\"]\n return model_inputs",
"# Map the preprocessing function across our dataset\ntokenized_dataset = URL(preprocess_function, batched=True)",
"# Set up Rouge score for evaluation\nnltk.download(\"punkt\", quiet=True)\nmetric = URL(\"rouge\")\n\ndef compute_metrics(eval_preds):\n preds, labels = eval_preds\n\n # decode preds and labels\n labels = URL(labels != -100, labels, tokenizer.pad_token_id)\n decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)\n decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n\n # rougeLSum expects newline after each sentence\n decoded_preds = [\"\\n\".join(nltk.sent_tokenize(URL())) for pred in decoded_preds]\n decoded_labels = [\"\\n\".join(nltk.sent_tokenize(URL())) for label in decoded_labels]\n\n result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)\n return result",
"# Set up training arguments\ntraining_args = Seq2SeqTrainingArguments(\n output_dir=\"./results\",\n evaluation_strategy=\"epoch\",\n learning_rate=3e-4,\n per_device_train_batch_size=8,\n per_device_eval_batch_size=4,\n weight_decay=0.01,\n save_total_limit=3,\n num_train_epochs=2,\n predict_with_generate=True,\n push_to_hub=False\n)",
"# Set up trainer\ntrainer = Seq2SeqTrainer(\n model=model,\n args=training_args,\n train_dataset=tokenized_dataset[\"train\"],\n eval_dataset=tokenized_dataset[\"test\"],\n tokenizer=tokenizer,\n data_collator=data_collator,\n compute_metrics=compute_metrics\n)",
"# Train the model\nURL()",
"# Push to HF :)\ntrainer.push_to_hub()"
]
| [
"TAGS\n#language-English #license-cc-by-4.0 #region-us \n",
"# Load and split the dataset\ndataset = load_dataset(\"ajsbsd/presto\")\ndataset = dataset[\"train\"].train_test_split(test_size=0.2)",
"# Load the tokenizer, model, and data collator\ntokenizer = T5Tokenizer.from_pretrained(\"google/flan-t5-small\")\nmodel = T5ForConditionalGeneration.from_pretrained(\"google/flan-t5-small\")\ndata_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)",
"# We prefix our tasks with \"answer the question\"\nprefix = \"answer the question: \"",
"# Define our preprocessing function\ndef preprocess_function(examples):\n \"\"\"Add prefix to the sentences, tokenize the text, and set the labels\"\"\"\n # The \"inputs\" are the tokenized answer:\n inputs = [prefix + doc for doc in examples[\"inputs\"]]\n model_inputs = tokenizer(inputs, max_length=128, truncation=True)\n\n # The \"labels\" are the tokenized outputs:\n labels = tokenizer(text_target=examples[\"targets\"], max_length=512, truncation=True)\n model_inputs[\"labels\"] = labels[\"input_ids\"]\n return model_inputs",
"# Map the preprocessing function across our dataset\ntokenized_dataset = URL(preprocess_function, batched=True)",
"# Set up Rouge score for evaluation\nnltk.download(\"punkt\", quiet=True)\nmetric = URL(\"rouge\")\n\ndef compute_metrics(eval_preds):\n preds, labels = eval_preds\n\n # decode preds and labels\n labels = URL(labels != -100, labels, tokenizer.pad_token_id)\n decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)\n decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n\n # rougeLSum expects newline after each sentence\n decoded_preds = [\"\\n\".join(nltk.sent_tokenize(URL())) for pred in decoded_preds]\n decoded_labels = [\"\\n\".join(nltk.sent_tokenize(URL())) for label in decoded_labels]\n\n result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)\n return result",
"# Set up training arguments\ntraining_args = Seq2SeqTrainingArguments(\n output_dir=\"./results\",\n evaluation_strategy=\"epoch\",\n learning_rate=3e-4,\n per_device_train_batch_size=8,\n per_device_eval_batch_size=4,\n weight_decay=0.01,\n save_total_limit=3,\n num_train_epochs=2,\n predict_with_generate=True,\n push_to_hub=False\n)",
"# Set up trainer\ntrainer = Seq2SeqTrainer(\n model=model,\n args=training_args,\n train_dataset=tokenized_dataset[\"train\"],\n eval_dataset=tokenized_dataset[\"test\"],\n tokenizer=tokenizer,\n data_collator=data_collator,\n compute_metrics=compute_metrics\n)",
"# Train the model\nURL()",
"# Push to HF :)\ntrainer.push_to_hub()"
]
| [
19,
51,
95,
22,
187,
32,
279,
122,
92,
7,
16
]
| [
"passage: TAGS\n#language-English #license-cc-by-4.0 #region-us \n# Load and split the dataset\ndataset = load_dataset(\"ajsbsd/presto\")\ndataset = dataset[\"train\"].train_test_split(test_size=0.2)# Load the tokenizer, model, and data collator\ntokenizer = T5Tokenizer.from_pretrained(\"google/flan-t5-small\")\nmodel = T5ForConditionalGeneration.from_pretrained(\"google/flan-t5-small\")\ndata_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)# We prefix our tasks with \"answer the question\"\nprefix = \"answer the question: \"# Define our preprocessing function\ndef preprocess_function(examples):\n \"\"\"Add prefix to the sentences, tokenize the text, and set the labels\"\"\"\n # The \"inputs\" are the tokenized answer:\n inputs = [prefix + doc for doc in examples[\"inputs\"]]\n model_inputs = tokenizer(inputs, max_length=128, truncation=True)\n\n # The \"labels\" are the tokenized outputs:\n labels = tokenizer(text_target=examples[\"targets\"], max_length=512, truncation=True)\n model_inputs[\"labels\"] = labels[\"input_ids\"]\n return model_inputs# Map the preprocessing function across our dataset\ntokenized_dataset = URL(preprocess_function, batched=True)"
]
|
0698d61b4a6bd6b695b89f321286598ddbc59ba2 | # Dataset Card for "dataset_2000_decompese_question_3"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | presencesw/dataset_2000_decompese_question_3 | [
"region:us"
]
| 2023-11-09T23:35:42+00:00 | {"dataset_info": {"features": [{"name": "entities", "sequence": "null"}, {"name": "triplets", "list": [{"name": "question", "dtype": "string"}]}, {"name": "answer", "dtype": "string"}, {"name": "complex_question", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 70373, "num_examples": 199}], "download_size": 27081, "dataset_size": 70373}} | 2023-11-09T23:35:43+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "dataset_2000_decompese_question_3"
More Information needed | [
"# Dataset Card for \"dataset_2000_decompese_question_3\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"dataset_2000_decompese_question_3\"\n\nMore Information needed"
]
| [
6,
23
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"dataset_2000_decompese_question_3\"\n\nMore Information needed"
]
|
5b274f8bd050ecf3051b7c93328508b85172f645 | # Dataset Card for "donut_employee_info_v1"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | accavdar/donut_employee_info_v1 | [
"region:us"
]
| 2023-11-09T23:52:48+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "ground_truth", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6186374.0, "num_examples": 80}, {"name": "test", "num_bytes": 1540228.0, "num_examples": 20}], "download_size": 6685265, "dataset_size": 7726602.0}} | 2023-11-09T23:52:57+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "donut_employee_info_v1"
More Information needed | [
"# Dataset Card for \"donut_employee_info_v1\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"donut_employee_info_v1\"\n\nMore Information needed"
]
| [
6,
21
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"donut_employee_info_v1\"\n\nMore Information needed"
]
|
fea6f4f66c92e54b8a8ff8a44e1b3b203cb65817 | # Dataset Card for "70a3f689"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | result-kand2-sdxl-wuerst-karlo/70a3f689 | [
"region:us"
]
| 2023-11-10T00:19:19+00:00 | {"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 163, "num_examples": 10}], "download_size": 1345, "dataset_size": 163}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-10T00:19:20+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "70a3f689"
More Information needed | [
"# Dataset Card for \"70a3f689\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"70a3f689\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"70a3f689\"\n\nMore Information needed"
]
|
8292b7ad40e76779c39035ae04f4182ae4d6118f | # Dataset Card for "ae385042"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | result-kand2-sdxl-wuerst-karlo/ae385042 | [
"region:us"
]
| 2023-11-10T00:22:08+00:00 | {"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 161, "num_examples": 10}], "download_size": 1340, "dataset_size": 161}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-10T00:22:08+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "ae385042"
More Information needed | [
"# Dataset Card for \"ae385042\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"ae385042\"\n\nMore Information needed"
]
| [
6,
15
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"ae385042\"\n\nMore Information needed"
]
|
9ea7e54ad978ac19ec07489ab8c8e968667cc502 | # Dataset Card for "wikilingua_data-xlsum_temario_results"
rouge= {'rouge1': 0.22676756630166944, 'rouge2': 0.05733749409742467, 'rougeL': 0.14739216031183608, 'rougeLsum': 0.14739216031183608}
bert= {'precision': 0.6762088215285404, 'recall': 0.7127016072322895, 'f1': 0.6928288537413521}
mover=0.5831551191071093 | arthurmluz/wikilingua_data-xlsum_temario_results | [
"region:us"
]
| 2023-11-10T00:33:08+00:00 | {"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 24426752, "num_examples": 8165}], "download_size": 14578091, "dataset_size": 24426752}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]} | 2023-11-13T19:51:19+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "wikilingua_data-xlsum_temario_results"
rouge= {'rouge1': 0.22676756630166944, 'rouge2': 0.05733749409742467, 'rougeL': 0.14739216031183608, 'rougeLsum': 0.14739216031183608}
bert= {'precision': 0.6762088215285404, 'recall': 0.7127016072322895, 'f1': 0.6928288537413521}
mover=0.5831551191071093 | [
"# Dataset Card for \"wikilingua_data-xlsum_temario_results\"\n\nrouge= {'rouge1': 0.22676756630166944, 'rouge2': 0.05733749409742467, 'rougeL': 0.14739216031183608, 'rougeLsum': 0.14739216031183608}\n\nbert= {'precision': 0.6762088215285404, 'recall': 0.7127016072322895, 'f1': 0.6928288537413521}\n\nmover=0.5831551191071093"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"wikilingua_data-xlsum_temario_results\"\n\nrouge= {'rouge1': 0.22676756630166944, 'rouge2': 0.05733749409742467, 'rougeL': 0.14739216031183608, 'rougeLsum': 0.14739216031183608}\n\nbert= {'precision': 0.6762088215285404, 'recall': 0.7127016072322895, 'f1': 0.6928288537413521}\n\nmover=0.5831551191071093"
]
| [
6,
139
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"wikilingua_data-xlsum_temario_results\"\n\nrouge= {'rouge1': 0.22676756630166944, 'rouge2': 0.05733749409742467, 'rougeL': 0.14739216031183608, 'rougeLsum': 0.14739216031183608}\n\nbert= {'precision': 0.6762088215285404, 'recall': 0.7127016072322895, 'f1': 0.6928288537413521}\n\nmover=0.5831551191071093"
]
|
f4bfad77f0f35aa01571f2b9a075bab8ca6a78a6 | # Dataset Card for "imdb-card-pred-decimal"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | vic0428/imdb-card-pred-decimal | [
"region:us"
]
| 2023-11-10T01:06:42+00:00 | {"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "true_cardinality", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 39101954.4, "num_examples": 80000}, {"name": "test", "num_bytes": 9775488.6, "num_examples": 20000}], "download_size": 8384711, "dataset_size": 48877443.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]} | 2023-11-18T06:20:19+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "imdb-card-pred-decimal"
More Information needed | [
"# Dataset Card for \"imdb-card-pred-decimal\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"imdb-card-pred-decimal\"\n\nMore Information needed"
]
| [
6,
19
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"imdb-card-pred-decimal\"\n\nMore Information needed"
]
|
8604aa1602a73e15300ab72797a0e102ddaa22e8 | # Dataset Card for "imdb-card-pred-binary"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | vic0428/imdb-card-pred-binary | [
"region:us"
]
| 2023-11-10T01:11:50+00:00 | {"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "true_cardinality", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 40068212.8, "num_examples": 80000}, {"name": "test", "num_bytes": 10017053.2, "num_examples": 20000}], "download_size": 8595296, "dataset_size": 50085266.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]} | 2023-11-18T06:20:09+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "imdb-card-pred-binary"
More Information needed | [
"# Dataset Card for \"imdb-card-pred-binary\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"imdb-card-pred-binary\"\n\nMore Information needed"
]
| [
6,
19
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"imdb-card-pred-binary\"\n\nMore Information needed"
]
|
e2e3da4b55e66195aaacb5342c3994e6eb6ba8aa | # Dataset Card for "imdb-card-pred-science"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | vic0428/imdb-card-pred-science | [
"region:us"
]
| 2023-11-10T01:11:52+00:00 | {"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "true_cardinality", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 39344995.2, "num_examples": 80000}, {"name": "test", "num_bytes": 9836248.8, "num_examples": 20000}], "download_size": 8632280, "dataset_size": 49181244.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]} | 2023-11-18T06:20:28+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "imdb-card-pred-science"
More Information needed | [
"# Dataset Card for \"imdb-card-pred-science\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"imdb-card-pred-science\"\n\nMore Information needed"
]
| [
6,
18
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"imdb-card-pred-science\"\n\nMore Information needed"
]
|
508e19a752399c05e62daf3c2cfd09b86fafc927 | # Dataset Card for "dataset_2000_decompese_question_4"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | presencesw/dataset_2000_decompese_question_4 | [
"region:us"
]
| 2023-11-10T01:25:07+00:00 | {"dataset_info": {"features": [{"name": "entities", "sequence": "null"}, {"name": "triplets", "list": [{"name": "question", "dtype": "string"}]}, {"name": "answer", "dtype": "string"}, {"name": "complex_question", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 70501, "num_examples": 199}], "download_size": 25954, "dataset_size": 70501}} | 2023-11-10T01:25:09+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "dataset_2000_decompese_question_4"
More Information needed | [
"# Dataset Card for \"dataset_2000_decompese_question_4\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"dataset_2000_decompese_question_4\"\n\nMore Information needed"
]
| [
6,
23
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"dataset_2000_decompese_question_4\"\n\nMore Information needed"
]
|
5a43188fdc818aa3d07cc5259d3611cfbf89e7c1 | # Dataset Card for "recipes"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | nlplabtdtu/recipes | [
"region:us"
]
| 2023-11-10T01:38:00+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "food", "dtype": "string"}, {"name": "ingredients", "dtype": "string"}, {"name": "recipe", "dtype": "string"}, {"name": "prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 403416.11312217196, "num_examples": 203}], "download_size": 190780, "dataset_size": 403416.11312217196}} | 2023-11-10T01:54:07+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "recipes"
More Information needed | [
"# Dataset Card for \"recipes\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"recipes\"\n\nMore Information needed"
]
| [
6,
13
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"recipes\"\n\nMore Information needed"
]
|
f642e53ae330822f90b338b68947f65887f3c9af | # Dataset Card for "fashionpedia"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | sandrocaseiro/fashionpedia | [
"region:us"
]
| 2023-11-10T01:49:27+00:00 | {"dataset_info": {"features": [{"name": "image_id", "dtype": "int64"}, {"name": "image", "dtype": "image"}, {"name": "width", "dtype": "int64"}, {"name": "height", "dtype": "int64"}, {"name": "objects", "struct": [{"name": "bbox_id", "sequence": "int64"}, {"name": "bbox", "sequence": {"sequence": "float64"}}, {"name": "category", "sequence": {"class_label": {"names": {"0": "shirt, blouse", "1": "top, t-shirt, sweatshirt", "2": "sweater", "3": "cardigan", "4": "jacket", "5": "vest", "6": "pants", "7": "shorts", "8": "skirt", "9": "coat", "10": "dress", "11": "jumpsuit", "12": "cape", "13": "glasses", "14": "hat", "15": "headband, head covering, hair accessory", "16": "tie", "17": "glove", "18": "watch", "19": "belt", "20": "leg warmer", "21": "tights, stockings", "22": "sock", "23": "shoe", "24": "bag, wallet", "25": "scarf", "26": "umbrella", "27": "hood", "28": "collar", "29": "lapel", "30": "epaulette", "31": "sleeve", "32": "pocket", "33": "neckline", "34": "buckle", "35": "zipper", "36": "applique", "37": "bead", "38": "bow", "39": "flower", "40": "fringe", "41": "ribbon", "42": "rivet", "43": "ruffle", "44": "sequin", "45": "tassel"}}}}, {"name": "area", "sequence": "int64"}, {"name": "segmentation", "sequence": {"sequence": {"sequence": "int64"}}}]}], "splits": [{"name": "train", "num_bytes": 3812764522.759, "num_examples": 45623}, {"name": "val", "num_bytes": 100185461.28, "num_examples": 1158}], "download_size": 3519915966, "dataset_size": 3912949984.039}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "val", "path": "data/val-*"}]}]} | 2023-11-10T18:49:14+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "fashionpedia"
More Information needed | [
"# Dataset Card for \"fashionpedia\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"fashionpedia\"\n\nMore Information needed"
]
| [
6,
12
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"fashionpedia\"\n\nMore Information needed"
]
|
737365da49abc0c4d43c0ede50025b532691703f | # Dataset Card for "cstnews_data-xlsum_temario_results"
rouge= {'rouge1': 0.47212177671448063, 'rouge2': 0.2811985678373053, 'rougeL': 0.348694400169423, 'rougeLsum': 0.348694400169423}
bert= {'precision': 0.7867038622498512, 'recall': 0.7567419111728668, 'f1': 0.7705440074205399}
mover = 0.6284352769565635 | arthurmluz/cstnews_data-xlsum_temario_results | [
"region:us"
]
| 2023-11-10T02:12:49+00:00 | {"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 56230, "num_examples": 16}], "download_size": 53610, "dataset_size": 56230}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]} | 2023-11-15T03:54:14+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "cstnews_data-xlsum_temario_results"
rouge= {'rouge1': 0.47212177671448063, 'rouge2': 0.2811985678373053, 'rougeL': 0.348694400169423, 'rougeLsum': 0.348694400169423}
bert= {'precision': 0.7867038622498512, 'recall': 0.7567419111728668, 'f1': 0.7705440074205399}
mover = 0.6284352769565635 | [
"# Dataset Card for \"cstnews_data-xlsum_temario_results\"\n\nrouge= {'rouge1': 0.47212177671448063, 'rouge2': 0.2811985678373053, 'rougeL': 0.348694400169423, 'rougeLsum': 0.348694400169423}\n\nbert= {'precision': 0.7867038622498512, 'recall': 0.7567419111728668, 'f1': 0.7705440074205399}\n\nmover = 0.6284352769565635"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"cstnews_data-xlsum_temario_results\"\n\nrouge= {'rouge1': 0.47212177671448063, 'rouge2': 0.2811985678373053, 'rougeL': 0.348694400169423, 'rougeLsum': 0.348694400169423}\n\nbert= {'precision': 0.7867038622498512, 'recall': 0.7567419111728668, 'f1': 0.7705440074205399}\n\nmover = 0.6284352769565635"
]
| [
6,
141
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"cstnews_data-xlsum_temario_results\"\n\nrouge= {'rouge1': 0.47212177671448063, 'rouge2': 0.2811985678373053, 'rougeL': 0.348694400169423, 'rougeLsum': 0.348694400169423}\n\nbert= {'precision': 0.7867038622498512, 'recall': 0.7567419111728668, 'f1': 0.7705440074205399}\n\nmover = 0.6284352769565635"
]
|
a833ca8794811a0bfeac5edd261f3f3137e2d7cf | # Dataset Card for "admits_fyi"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | chirunder/admits | [
"region:us"
]
| 2023-11-10T02:20:45+00:00 | {"dataset_info": {"features": [{"name": "gre_quant", "dtype": "int64"}, {"name": "gre_verbal", "dtype": "int64"}, {"name": "gre_awa", "dtype": "float64"}, {"name": "gre_total", "dtype": "int64"}, {"name": "toefl", "dtype": "int64"}, {"name": "year", "dtype": "int64"}, {"name": "term", "dtype": "string"}, {"name": "grade_scale", "dtype": "int64"}, {"name": "ielts", "dtype": "float64"}, {"name": "grade_score", "dtype": "float64"}, {"name": "undergrad_major", "dtype": "string"}, {"name": "undergrad_university", "dtype": "string"}, {"name": "admits", "sequence": "string"}, {"name": "rejects", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 89517, "num_examples": 320}], "download_size": 22092, "dataset_size": 89517}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-10T02:29:29+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "admits_fyi"
More Information needed | [
"# Dataset Card for \"admits_fyi\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"admits_fyi\"\n\nMore Information needed"
]
| [
6,
16
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"admits_fyi\"\n\nMore Information needed"
]
|
a5e5cd2a716431931d95090f76c04a8b6611d9aa | # Dataset Card for "GPTextSum_data-xlsum_temario_results"
rouge= {'rouge1': 0.41383134368121127, 'rouge2': 0.21155454924836806, 'rougeL': 0.32959975412911574, 'rougeLsum': 0.32959975412911574}
bert= {'precision': 0.7497879922389984, 'recall': 0.7460619449615479, 'f1': 0.7474037945270539} | arthurmluz/GPTextSum_data-xlsum_temario_results | [
"region:us"
]
| 2023-11-10T02:23:13+00:00 | {"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 28964, "num_examples": 20}], "download_size": 36328, "dataset_size": 28964}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]} | 2023-11-15T04:10:51+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "GPTextSum_data-xlsum_temario_results"
rouge= {'rouge1': 0.41383134368121127, 'rouge2': 0.21155454924836806, 'rougeL': 0.32959975412911574, 'rougeLsum': 0.32959975412911574}
bert= {'precision': 0.7497879922389984, 'recall': 0.7460619449615479, 'f1': 0.7474037945270539} | [
"# Dataset Card for \"GPTextSum_data-xlsum_temario_results\"\n\nrouge= {'rouge1': 0.41383134368121127, 'rouge2': 0.21155454924836806, 'rougeL': 0.32959975412911574, 'rougeLsum': 0.32959975412911574}\n\nbert= {'precision': 0.7497879922389984, 'recall': 0.7460619449615479, 'f1': 0.7474037945270539}"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"GPTextSum_data-xlsum_temario_results\"\n\nrouge= {'rouge1': 0.41383134368121127, 'rouge2': 0.21155454924836806, 'rougeL': 0.32959975412911574, 'rougeLsum': 0.32959975412911574}\n\nbert= {'precision': 0.7497879922389984, 'recall': 0.7460619449615479, 'f1': 0.7474037945270539}"
]
| [
6,
132
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"GPTextSum_data-xlsum_temario_results\"\n\nrouge= {'rouge1': 0.41383134368121127, 'rouge2': 0.21155454924836806, 'rougeL': 0.32959975412911574, 'rougeLsum': 0.32959975412911574}\n\nbert= {'precision': 0.7497879922389984, 'recall': 0.7460619449615479, 'f1': 0.7474037945270539}"
]
|
1e23789be5a004293041852bc2e4f07ffd24f105 | # Dataset Card for "zaloai-2023-nlp-test-vi"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | vinhtran2611/zaloai-2023-nlp-test-vi | [
"region:us"
]
| 2023-11-10T02:24:25+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "choices", "sequence": "string"}, {"name": "explanation", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "answer", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 303871, "num_examples": 1200}], "download_size": 154908, "dataset_size": 303871}} | 2023-11-10T02:24:59+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "zaloai-2023-nlp-test-vi"
More Information needed | [
"# Dataset Card for \"zaloai-2023-nlp-test-vi\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"zaloai-2023-nlp-test-vi\"\n\nMore Information needed"
]
| [
6,
22
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"zaloai-2023-nlp-test-vi\"\n\nMore Information needed"
]
|
d045ded991af61f8dd9279f03446cf4a1fd51e6c | # Dataset Card for "temario_data-xlsum_temario_results"
rouge= {'rouge1': 0.4645430247612985, 'rouge2': 0.20047832480264657, 'rougeL': 0.28643965943084165, 'rougeLsum': 0.28643965943084165}
bert= {'precision': 0.7404904127120971, 'recall': 0.7317324829101562, 'f1': 0.7355524206161499}
mover = 0.6322561420505246 | arthurmluz/temario_data-xlsum_temario_results | [
"region:us"
]
| 2023-11-10T02:29:09+00:00 | {"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 231919, "num_examples": 25}], "download_size": 187834, "dataset_size": 231919}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]} | 2023-11-15T03:31:07+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "temario_data-xlsum_temario_results"
rouge= {'rouge1': 0.4645430247612985, 'rouge2': 0.20047832480264657, 'rougeL': 0.28643965943084165, 'rougeLsum': 0.28643965943084165}
bert= {'precision': 0.7404904127120971, 'recall': 0.7317324829101562, 'f1': 0.7355524206161499}
mover = 0.6322561420505246 | [
"# Dataset Card for \"temario_data-xlsum_temario_results\"\n\nrouge= {'rouge1': 0.4645430247612985, 'rouge2': 0.20047832480264657, 'rougeL': 0.28643965943084165, 'rougeLsum': 0.28643965943084165}\n\nbert= {'precision': 0.7404904127120971, 'recall': 0.7317324829101562, 'f1': 0.7355524206161499}\n\nmover = 0.6322561420505246"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"temario_data-xlsum_temario_results\"\n\nrouge= {'rouge1': 0.4645430247612985, 'rouge2': 0.20047832480264657, 'rougeL': 0.28643965943084165, 'rougeLsum': 0.28643965943084165}\n\nbert= {'precision': 0.7404904127120971, 'recall': 0.7317324829101562, 'f1': 0.7355524206161499}\n\nmover = 0.6322561420505246"
]
| [
6,
144
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"temario_data-xlsum_temario_results\"\n\nrouge= {'rouge1': 0.4645430247612985, 'rouge2': 0.20047832480264657, 'rougeL': 0.28643965943084165, 'rougeLsum': 0.28643965943084165}\n\nbert= {'precision': 0.7404904127120971, 'recall': 0.7317324829101562, 'f1': 0.7355524206161499}\n\nmover = 0.6322561420505246"
]
|
5f98d80c9ca7ba7b614540a5a565d495aa7c2a9b |
# Dataset Card for Unsupervised Peoples Speech
## Table of Contents
- [Dataset Card for Unuspervised Peoples Speech](#dataset-card-for-unsupervised-peoples-speech)
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Relevant Statistics](#relevant-statistics)
- [Dataset Structure](#dataset-structure)
- [Audio folders](#audio-folders)
- [Dataset Creation](#dataset-creation)
- [Source Data](#source-data)
- [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
- [Preprocessing](#preprocessing)
- [Annotations] (#annotations)
- [Annotation Process] (#annotation-process)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Discussion of Biases](#discussion-of-biases)
- [Additional Information](#additional-information)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
## Dataset Description
- **Point of Contact:** [[email protected]](mailto:[email protected])
### Dataset Summary
The Unsupervised Peoples Speech Dataset is a compilation of audiofiles extracted from Archive.org that is licensed for academic and commercial usage under CC-BY and CC-BY-SA licenses. It includes more than one million hours of audio with a diverse set of speakers.
### Relevant Statistics
#### Duration Distribution
Most of the audios range between 1 and 10 minutes in length, with only 14 of them exceeding the 100 hour mark.

#### Sample Rates
99% of the audio in the dataset has a 44.1Khz sample rate, and the remaining audio varies from the more common 16Khz, 24Khz and 48 Khz to custom sample rates.

## Dataset Structure
### Audio folders
Folders with the raw audio. We split this into two directories because Hugging Face does not support more than 10,000 files in a single directory.
## Dataset Creation
### Source Data
#### Initial Data Collection and Normalization
Data was downloaded via the archive.org API. No data inference was done.
#### Preprocessing
No preprocessing was done.
### Annotations
#### Annotation process
No manual annotation is done. We download only source audio.
In particular, there is no "forced alignment" or "segmentation" done on this dataset.
### Personal and Sensitive Information
Several of our sources are legal and government proceedings, spoken stories, speeches, and so on. Given that these were intended as public documents and licensed as such, it is natural that the involved individuals are aware of this.
## Considerations for Using the Data
### Discussion of Biases
Our data is downloaded from archive.org. As such, the data is biased towards whatever users decide to upload there.
Almost all of our data is American accented English.
## Additional Information
### Licensing Information
The source data contains data under CC-BY-SA and CC-BY licenses.
We license this dataset under https://creativecommons.org/licenses/by-sa/4.0/
### Citation Information
Please cite
```
@article{USP,
author={Daniel Galvez and
Ryan Hileman and
Rafael Mosquera and
Juan Ciro and
Kurt Bollacker and
Peter Mattson and
David Kanter},
title = {Unsupervised People's Speech (The Million Hour Audio Dataset)},
year = {2023},
url = {https://huggingface.co/datasets/MLCommons/peoples_speech},
}
``` | MLCommons/unsupervised_peoples_speech | [
"task_categories:automatic-speech-recognition",
"task_categories:audio-classification",
"task_ids:audio-language-identification",
"language:eng",
"audio",
"unsupervised",
"region:us"
]
| 2023-11-10T02:40:09+00:00 | {"language": ["eng"], "task_categories": ["automatic-speech-recognition", "audio-classification"], "task_ids": ["audio-language-identification"], "pretty_name": "Unsupervised Peoples Speech", "tags": ["audio", "unsupervised"], "viewer": false} | 2023-11-28T17:57:53+00:00 | []
| [
"eng"
]
| TAGS
#task_categories-automatic-speech-recognition #task_categories-audio-classification #task_ids-audio-language-identification #language-English #audio #unsupervised #region-us
|
# Dataset Card for Unsupervised Peoples Speech
## Table of Contents
- Dataset Card for Unuspervised Peoples Speech
- Table of Contents
- Dataset Description
- Dataset Summary
- Relevant Statistics
- Dataset Structure
- Audio folders
- Dataset Creation
- Source Data
- Initial Data Collection and Normalization
- Preprocessing
- [Annotations] (#annotations)
- [Annotation Process] (#annotation-process)
- Considerations for Using the Data
- Discussion of Biases
- Additional Information
- Licensing Information
- Citation Information
## Dataset Description
- Point of Contact: datasets@URL
### Dataset Summary
The Unsupervised Peoples Speech Dataset is a compilation of audiofiles extracted from URL that is licensed for academic and commercial usage under CC-BY and CC-BY-SA licenses. It includes more than one million hours of audio with a diverse set of speakers.
### Relevant Statistics
#### Duration Distribution
Most of the audios range between 1 and 10 minutes in length, with only 14 of them exceeding the 100 hour mark.
!Duration Distribution
#### Sample Rates
99% of the audio in the dataset has a 44.1Khz sample rate, and the remaining audio varies from the more common 16Khz, 24Khz and 48 Khz to custom sample rates.
!Sample Rates
## Dataset Structure
### Audio folders
Folders with the raw audio. We split this into two directories because Hugging Face does not support more than 10,000 files in a single directory.
## Dataset Creation
### Source Data
#### Initial Data Collection and Normalization
Data was downloaded via the URL API. No data inference was done.
#### Preprocessing
No preprocessing was done.
### Annotations
#### Annotation process
No manual annotation is done. We download only source audio.
In particular, there is no "forced alignment" or "segmentation" done on this dataset.
### Personal and Sensitive Information
Several of our sources are legal and government proceedings, spoken stories, speeches, and so on. Given that these were intended as public documents and licensed as such, it is natural that the involved individuals are aware of this.
## Considerations for Using the Data
### Discussion of Biases
Our data is downloaded from URL. As such, the data is biased towards whatever users decide to upload there.
Almost all of our data is American accented English.
## Additional Information
### Licensing Information
The source data contains data under CC-BY-SA and CC-BY licenses.
We license this dataset under URL
Please cite
| [
"# Dataset Card for Unsupervised Peoples Speech",
"## Table of Contents\n- Dataset Card for Unuspervised Peoples Speech\n - Table of Contents\n - Dataset Description\n - Dataset Summary\n - Relevant Statistics\n - Dataset Structure\n - Audio folders\n - Dataset Creation\n - Source Data\n - Initial Data Collection and Normalization\n - Preprocessing\n - [Annotations] (#annotations)\n - [Annotation Process] (#annotation-process)\n - Considerations for Using the Data\n - Discussion of Biases\n - Additional Information\n - Licensing Information\n - Citation Information",
"## Dataset Description\n\n- Point of Contact: datasets@URL",
"### Dataset Summary\n\nThe Unsupervised Peoples Speech Dataset is a compilation of audiofiles extracted from URL that is licensed for academic and commercial usage under CC-BY and CC-BY-SA licenses. It includes more than one million hours of audio with a diverse set of speakers.",
"### Relevant Statistics",
"#### Duration Distribution\n\nMost of the audios range between 1 and 10 minutes in length, with only 14 of them exceeding the 100 hour mark.\n\n!Duration Distribution",
"#### Sample Rates\n\n99% of the audio in the dataset has a 44.1Khz sample rate, and the remaining audio varies from the more common 16Khz, 24Khz and 48 Khz to custom sample rates.\n\n!Sample Rates",
"## Dataset Structure",
"### Audio folders\n\nFolders with the raw audio. We split this into two directories because Hugging Face does not support more than 10,000 files in a single directory.",
"## Dataset Creation",
"### Source Data",
"#### Initial Data Collection and Normalization\n\nData was downloaded via the URL API. No data inference was done.",
"#### Preprocessing\n\nNo preprocessing was done.",
"### Annotations",
"#### Annotation process\n\nNo manual annotation is done. We download only source audio.\n\nIn particular, there is no \"forced alignment\" or \"segmentation\" done on this dataset.",
"### Personal and Sensitive Information\n\nSeveral of our sources are legal and government proceedings, spoken stories, speeches, and so on. Given that these were intended as public documents and licensed as such, it is natural that the involved individuals are aware of this.",
"## Considerations for Using the Data",
"### Discussion of Biases\n\nOur data is downloaded from URL. As such, the data is biased towards whatever users decide to upload there.\n\nAlmost all of our data is American accented English.",
"## Additional Information",
"### Licensing Information\n\nThe source data contains data under CC-BY-SA and CC-BY licenses. \n\nWe license this dataset under URL\n\n\nPlease cite"
]
| [
"TAGS\n#task_categories-automatic-speech-recognition #task_categories-audio-classification #task_ids-audio-language-identification #language-English #audio #unsupervised #region-us \n",
"# Dataset Card for Unsupervised Peoples Speech",
"## Table of Contents\n- Dataset Card for Unuspervised Peoples Speech\n - Table of Contents\n - Dataset Description\n - Dataset Summary\n - Relevant Statistics\n - Dataset Structure\n - Audio folders\n - Dataset Creation\n - Source Data\n - Initial Data Collection and Normalization\n - Preprocessing\n - [Annotations] (#annotations)\n - [Annotation Process] (#annotation-process)\n - Considerations for Using the Data\n - Discussion of Biases\n - Additional Information\n - Licensing Information\n - Citation Information",
"## Dataset Description\n\n- Point of Contact: datasets@URL",
"### Dataset Summary\n\nThe Unsupervised Peoples Speech Dataset is a compilation of audiofiles extracted from URL that is licensed for academic and commercial usage under CC-BY and CC-BY-SA licenses. It includes more than one million hours of audio with a diverse set of speakers.",
"### Relevant Statistics",
"#### Duration Distribution\n\nMost of the audios range between 1 and 10 minutes in length, with only 14 of them exceeding the 100 hour mark.\n\n!Duration Distribution",
"#### Sample Rates\n\n99% of the audio in the dataset has a 44.1Khz sample rate, and the remaining audio varies from the more common 16Khz, 24Khz and 48 Khz to custom sample rates.\n\n!Sample Rates",
"## Dataset Structure",
"### Audio folders\n\nFolders with the raw audio. We split this into two directories because Hugging Face does not support more than 10,000 files in a single directory.",
"## Dataset Creation",
"### Source Data",
"#### Initial Data Collection and Normalization\n\nData was downloaded via the URL API. No data inference was done.",
"#### Preprocessing\n\nNo preprocessing was done.",
"### Annotations",
"#### Annotation process\n\nNo manual annotation is done. We download only source audio.\n\nIn particular, there is no \"forced alignment\" or \"segmentation\" done on this dataset.",
"### Personal and Sensitive Information\n\nSeveral of our sources are legal and government proceedings, spoken stories, speeches, and so on. Given that these were intended as public documents and licensed as such, it is natural that the involved individuals are aware of this.",
"## Considerations for Using the Data",
"### Discussion of Biases\n\nOur data is downloaded from URL. As such, the data is biased towards whatever users decide to upload there.\n\nAlmost all of our data is American accented English.",
"## Additional Information",
"### Licensing Information\n\nThe source data contains data under CC-BY-SA and CC-BY licenses. \n\nWe license this dataset under URL\n\n\nPlease cite"
]
| [
58,
12,
123,
14,
67,
7,
36,
58,
6,
36,
5,
4,
26,
12,
5,
43,
58,
8,
44,
5,
34
]
| [
"passage: TAGS\n#task_categories-automatic-speech-recognition #task_categories-audio-classification #task_ids-audio-language-identification #language-English #audio #unsupervised #region-us \n# Dataset Card for Unsupervised Peoples Speech## Table of Contents\n- Dataset Card for Unuspervised Peoples Speech\n - Table of Contents\n - Dataset Description\n - Dataset Summary\n - Relevant Statistics\n - Dataset Structure\n - Audio folders\n - Dataset Creation\n - Source Data\n - Initial Data Collection and Normalization\n - Preprocessing\n - [Annotations] (#annotations)\n - [Annotation Process] (#annotation-process)\n - Considerations for Using the Data\n - Discussion of Biases\n - Additional Information\n - Licensing Information\n - Citation Information## Dataset Description\n\n- Point of Contact: datasets@URL### Dataset Summary\n\nThe Unsupervised Peoples Speech Dataset is a compilation of audiofiles extracted from URL that is licensed for academic and commercial usage under CC-BY and CC-BY-SA licenses. It includes more than one million hours of audio with a diverse set of speakers.### Relevant Statistics#### Duration Distribution\n\nMost of the audios range between 1 and 10 minutes in length, with only 14 of them exceeding the 100 hour mark.\n\n!Duration Distribution#### Sample Rates\n\n99% of the audio in the dataset has a 44.1Khz sample rate, and the remaining audio varies from the more common 16Khz, 24Khz and 48 Khz to custom sample rates.\n\n!Sample Rates## Dataset Structure### Audio folders\n\nFolders with the raw audio. We split this into two directories because Hugging Face does not support more than 10,000 files in a single directory.## Dataset Creation### Source Data#### Initial Data Collection and Normalization\n\nData was downloaded via the URL API. No data inference was done.#### Preprocessing\n\nNo preprocessing was done.### Annotations"
]
|
ff797e5a1eb48a0c90459f82004e5d11d2d28a78 | # Dataset Card for "Extractive-QA-type-1"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | nlplabtdtu/Extractive-QA-type-1 | [
"region:us"
]
| 2023-11-10T02:40:44+00:00 | {"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}, {"name": "is_impossible", "dtype": "bool"}, {"name": "instruction", "dtype": "string"}, {"name": "prompt_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 41943254, "num_examples": 19240}], "download_size": 8170598, "dataset_size": 41943254}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-10T02:40:46+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "Extractive-QA-type-1"
More Information needed | [
"# Dataset Card for \"Extractive-QA-type-1\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"Extractive-QA-type-1\"\n\nMore Information needed"
]
| [
6,
18
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"Extractive-QA-type-1\"\n\nMore Information needed"
]
|
5a18e6c9e995b5dc379c3393b8dcbed39490eb7b | # Dataset Card for "encodec_24khz-b24.0-librispeech_asr-features"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | cmu-mlsp/encodec_24khz-b24.0-librispeech_asr-features | [
"region:us"
]
| 2023-11-10T02:52:28+00:00 | {"dataset_info": {"features": [{"name": "file", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 24000}}}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}, {"name": "chapter_id", "dtype": "int64"}, {"name": "id", "dtype": "string"}, {"name": "audio_codes", "sequence": "int64"}], "splits": [{"name": "test", "num_bytes": 935300282.5, "num_examples": 2620}], "download_size": 915321514, "dataset_size": 935300282.5}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]} | 2023-11-10T02:53:03+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "encodec_24khz-b24.0-librispeech_asr-features"
More Information needed | [
"# Dataset Card for \"encodec_24khz-b24.0-librispeech_asr-features\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"encodec_24khz-b24.0-librispeech_asr-features\"\n\nMore Information needed"
]
| [
6,
30
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"encodec_24khz-b24.0-librispeech_asr-features\"\n\nMore Information needed"
]
|
b10b33929217f997aa8700cf19569033ed2bdcec | # Dataset Card for "dataset_995_decompose_question"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | presencesw/dataset_995_decompose_question | [
"region:us"
]
| 2023-11-10T02:55:51+00:00 | {"dataset_info": {"features": [{"name": "entities", "sequence": "null"}, {"name": "triplets", "list": [{"name": "question", "dtype": "string"}]}, {"name": "answer", "dtype": "string"}, {"name": "complex_question", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 319598, "num_examples": 995}], "download_size": 120850, "dataset_size": 319598}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-10T02:55:52+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "dataset_995_decompose_question"
More Information needed | [
"# Dataset Card for \"dataset_995_decompose_question\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"dataset_995_decompose_question\"\n\nMore Information needed"
]
| [
6,
21
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"dataset_995_decompose_question\"\n\nMore Information needed"
]
|
13c44c3e58584ab770bf9a8f9c8835fa0b3d78d8 | # Dataset Card for "eye-gaze-dataset-test"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Yacong/eye-gaze-dataset-test | [
"region:us"
]
| 2023-11-10T02:59:03+00:00 | {"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "conditioning_image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2841284.0, "num_examples": 5}], "download_size": 2818559, "dataset_size": 2841284.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-10T03:40:49+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "eye-gaze-dataset-test"
More Information needed | [
"# Dataset Card for \"eye-gaze-dataset-test\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"eye-gaze-dataset-test\"\n\nMore Information needed"
]
| [
6,
19
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"eye-gaze-dataset-test\"\n\nMore Information needed"
]
|
5ca5fe074b2d8336982d09c2f9e1620fe3a62ecc | # Dataset Card for "share_gpt_filtered"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Phando/share_gpt_filtered | [
"region:us"
]
| 2023-11-10T03:10:51+00:00 | {"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "conversations", "list": [{"name": "from", "dtype": "string"}, {"name": "markdown", "struct": [{"name": "answer", "dtype": "string"}, {"name": "index", "dtype": "int64"}, {"name": "type", "dtype": "string"}]}, {"name": "text", "dtype": "string"}, {"name": "value", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 633293002, "num_examples": 94145}], "download_size": 234243543, "dataset_size": 633293002}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-10T03:11:50+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "share_gpt_filtered"
More Information needed | [
"# Dataset Card for \"share_gpt_filtered\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"share_gpt_filtered\"\n\nMore Information needed"
]
| [
6,
17
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"share_gpt_filtered\"\n\nMore Information needed"
]
|
c51d8a147bcf29b01a200b95c4ad39a2253d0133 | # Dataset Card for "test_ds"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Back-up/test_ds | [
"region:us"
]
| 2023-11-10T03:29:26+00:00 | {"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "response", "struct": [{"name": "response", "dtype": "string"}]}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}, {"name": "instruction", "dtype": "string"}, {"name": "prompt_name", "dtype": "string"}, {"name": "metadata", "struct": [{"name": "max_ratio", "dtype": "float64"}, {"name": "paragraph_similar", "dtype": "string"}, {"name": "start_index", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 21511872, "num_examples": 7597}], "download_size": 0, "dataset_size": 21511872}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-14T14:34:31+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "test_ds"
More Information needed | [
"# Dataset Card for \"test_ds\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"test_ds\"\n\nMore Information needed"
]
| [
6,
13
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"test_ds\"\n\nMore Information needed"
]
|
f2876594a3d0a4ab05a9f3ee76a5b9da5eee1abb | # Dataset Card for "mind_10k"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | jiuyuan/mind_10k | [
"region:us"
]
| 2023-11-10T03:32:51+00:00 | {"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 56720773, "num_examples": 9778}], "download_size": 25546197, "dataset_size": 56720773}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-10T03:32:54+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "mind_10k"
More Information needed | [
"# Dataset Card for \"mind_10k\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"mind_10k\"\n\nMore Information needed"
]
| [
6,
14
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"mind_10k\"\n\nMore Information needed"
]
|
eadf5c0a23642bcf85a8c1087116294dcbfeaf25 | # Dataset Card for "test_ds_1"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Back-up/test_ds_1 | [
"region:us"
]
| 2023-11-10T03:46:28+00:00 | {"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "options", "list": [{"name": "answer", "dtype": "string"}, {"name": "key", "dtype": "string"}]}, {"name": "answer", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 117374, "num_examples": 103}], "download_size": 30084, "dataset_size": 117374}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-10T03:46:31+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "test_ds_1"
More Information needed | [
"# Dataset Card for \"test_ds_1\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"test_ds_1\"\n\nMore Information needed"
]
| [
6,
14
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"test_ds_1\"\n\nMore Information needed"
]
|
925e87590c70d7b4952c4e4ab0a2891391626338 | # Dataset Card for "health-100"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Back-up/health-100 | [
"region:us"
]
| 2023-11-10T03:47:04+00:00 | {"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "options", "list": [{"name": "answer", "dtype": "string"}, {"name": "key", "dtype": "string"}]}, {"name": "answer", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 117374, "num_examples": 103}], "download_size": 30084, "dataset_size": 117374}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-10T03:47:07+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "health-100"
More Information needed | [
"# Dataset Card for \"health-100\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"health-100\"\n\nMore Information needed"
]
| [
6,
12
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"health-100\"\n\nMore Information needed"
]
|
19e26cd321c1ae06abb6ca8bbefed8faf7cd8716 | # Dataset Card for "complexquestion_BAMBOOGLE"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | presencesw/complexquestion_BAMBOOGLE | [
"region:us"
]
| 2023-11-10T04:00:07+00:00 | {"dataset_info": {"features": [{"name": "entities", "sequence": "null"}, {"name": "triplets", "list": [{"name": "question", "dtype": "string"}]}, {"name": "answer", "dtype": "string"}, {"name": "complex_question", "dtype": "string"}], "splits": [{"name": "v1", "num_bytes": 1734000, "num_examples": 8693}], "download_size": 0, "dataset_size": 1734000}, "configs": [{"config_name": "default", "data_files": [{"split": "v1", "path": "data/v1-*"}]}]} | 2023-11-10T08:10:10+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "complexquestion_BAMBOOGLE"
More Information needed | [
"# Dataset Card for \"complexquestion_BAMBOOGLE\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"complexquestion_BAMBOOGLE\"\n\nMore Information needed"
]
| [
6,
19
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"complexquestion_BAMBOOGLE\"\n\nMore Information needed"
]
|
8200071cc18611e68abad398f3e378dd9bc0ec12 | ## Overview
MeetingBank, a benchmark dataset created from the city councils of 6 major U.S. cities to supplement existing datasets. It contains 1,366 meetings with over 3,579 hours of video, as well as transcripts, PDF documents of meeting minutes, agenda, and other metadata. On average, a council meeting is 2.6 hours long and its transcript contains over 28k tokens, making it a valuable testbed for meeting summarizers and for extracting structure from meeting videos. The datasets contains 6,892 segment-level summarization instances for training and evaluating of performance.
## Data Structure
```json
{
"id": 0,
"uid": "SeattleCityCouncil_06132016_Res 31669",
"summary": "A RESOLUTION encouraging as a best practice ...",
"transcript": "The report of the Civil Rights, Utilities, Economic ..."
}
```
## Usage
```python
from datasets import load_dataset
meetingbank = load_dataset("huuuyeah/meetingbank")
train_data = meetingbank['train']
test_data = meetingbank['test']
val_data = meetingbank['validation']
def generator(data_split):
for instance in data_split:
yiled instance['id'], instance['summary'], instance['transcript']
```
## Acknowledgement
Please cite the following paper in work that makes use of this dataset:
[MeetingBank: A Benchmark Dataset for Meeting Summarization](https://arxiv.org/abs/2305.17529)\
Yebowen Hu, Tim Ganter, Hanieh Deilamsalehy, Franck Dernoncourt, Hassan Foroosh, Fei Liu\
In main conference of Association for Computational Linguistics (ACL'23), Toronto, Canada.
## Bibtex
```
@inproceedings{hu-etal-2023-meetingbank,
title = "MeetingBank: A Benchmark Dataset for Meeting Summarization",
author = "Yebowen Hu and Tim Ganter and Hanieh Deilamsalehy and Franck Dernoncourt and Hassan Foroosh and Fei Liu",
booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (ACL)",
month = July,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
}
```
## Multi-media Resources
MeetingBank dataset will be hosted at Zenodo. The audio files of each meeting will be hosted individually on Huggingface. All resources will includes meeting audio, transcripts, meetingbank main JSON file, summaries from 6 systems and human annotations.
**Text & Audio**: [zenodo](https://zenodo.org/record/7989108), Huggingface([splits](https://huggingface.co/datasets/huuuyeah/meetingbank), [audio&transcripts](https://huggingface.co/datasets/huuuyeah/MeetingBank_Audio))
**Videos**: All meeting videos can be found in https://archive.org/
- [Alameda](https://archive.org/details/meetingbank-alameda), [Boston](https://archive.org/details/meetingbank-boston), [Denver](https://archive.org/details/meetingbank-denver), [Long Beach](https://archive.org/details/meetingbank-long-beach) ,[King County](https://archive.org/details/meetingbank-king-county), [Seattle](https://archive.org/details/meetingbank-seattle)
**Python Scripts**
Useful scripts and guidance can be found in github repo [MeetingBank_Utils](https://github.com/YebowenHu/MeetingBank-utils) | huuuyeah/meetingbank | [
"task_categories:summarization",
"task_categories:text-generation",
"size_categories:10M<n<100M",
"language:en",
"license:cc-by-nc-sa-4.0",
"municipal",
"meeting",
"transcripts",
"benchmark",
"long-context",
"arxiv:2305.17529",
"region:us"
]
| 2023-11-10T04:02:31+00:00 | {"language": ["en"], "license": "cc-by-nc-sa-4.0", "size_categories": ["10M<n<100M"], "task_categories": ["summarization", "text-generation"], "tags": ["municipal", "meeting", "transcripts", "benchmark", "long-context"]} | 2023-11-10T04:52:54+00:00 | [
"2305.17529"
]
| [
"en"
]
| TAGS
#task_categories-summarization #task_categories-text-generation #size_categories-10M<n<100M #language-English #license-cc-by-nc-sa-4.0 #municipal #meeting #transcripts #benchmark #long-context #arxiv-2305.17529 #region-us
| ## Overview
MeetingBank, a benchmark dataset created from the city councils of 6 major U.S. cities to supplement existing datasets. It contains 1,366 meetings with over 3,579 hours of video, as well as transcripts, PDF documents of meeting minutes, agenda, and other metadata. On average, a council meeting is 2.6 hours long and its transcript contains over 28k tokens, making it a valuable testbed for meeting summarizers and for extracting structure from meeting videos. The datasets contains 6,892 segment-level summarization instances for training and evaluating of performance.
## Data Structure
## Usage
## Acknowledgement
Please cite the following paper in work that makes use of this dataset:
MeetingBank: A Benchmark Dataset for Meeting Summarization\
Yebowen Hu, Tim Ganter, Hanieh Deilamsalehy, Franck Dernoncourt, Hassan Foroosh, Fei Liu\
In main conference of Association for Computational Linguistics (ACL'23), Toronto, Canada.
## Bibtex
## Multi-media Resources
MeetingBank dataset will be hosted at Zenodo. The audio files of each meeting will be hosted individually on Huggingface. All resources will includes meeting audio, transcripts, meetingbank main JSON file, summaries from 6 systems and human annotations.
Text & Audio: zenodo, Huggingface(splits, audio&transcripts)
Videos: All meeting videos can be found in URL
- Alameda, Boston, Denver, Long Beach ,King County, Seattle
Python Scripts
Useful scripts and guidance can be found in github repo MeetingBank_Utils | [
"## Overview\n\nMeetingBank, a benchmark dataset created from the city councils of 6 major U.S. cities to supplement existing datasets. It contains 1,366 meetings with over 3,579 hours of video, as well as transcripts, PDF documents of meeting minutes, agenda, and other metadata. On average, a council meeting is 2.6 hours long and its transcript contains over 28k tokens, making it a valuable testbed for meeting summarizers and for extracting structure from meeting videos. The datasets contains 6,892 segment-level summarization instances for training and evaluating of performance.",
"## Data Structure",
"## Usage",
"## Acknowledgement\n\nPlease cite the following paper in work that makes use of this dataset:\n\nMeetingBank: A Benchmark Dataset for Meeting Summarization\\\nYebowen Hu, Tim Ganter, Hanieh Deilamsalehy, Franck Dernoncourt, Hassan Foroosh, Fei Liu\\\nIn main conference of Association for Computational Linguistics (ACL'23), Toronto, Canada.",
"## Bibtex",
"## Multi-media Resources\n\nMeetingBank dataset will be hosted at Zenodo. The audio files of each meeting will be hosted individually on Huggingface. All resources will includes meeting audio, transcripts, meetingbank main JSON file, summaries from 6 systems and human annotations.\n\nText & Audio: zenodo, Huggingface(splits, audio&transcripts)\n\nVideos: All meeting videos can be found in URL\n\n- Alameda, Boston, Denver, Long Beach ,King County, Seattle\n\nPython Scripts\nUseful scripts and guidance can be found in github repo MeetingBank_Utils"
]
| [
"TAGS\n#task_categories-summarization #task_categories-text-generation #size_categories-10M<n<100M #language-English #license-cc-by-nc-sa-4.0 #municipal #meeting #transcripts #benchmark #long-context #arxiv-2305.17529 #region-us \n",
"## Overview\n\nMeetingBank, a benchmark dataset created from the city councils of 6 major U.S. cities to supplement existing datasets. It contains 1,366 meetings with over 3,579 hours of video, as well as transcripts, PDF documents of meeting minutes, agenda, and other metadata. On average, a council meeting is 2.6 hours long and its transcript contains over 28k tokens, making it a valuable testbed for meeting summarizers and for extracting structure from meeting videos. The datasets contains 6,892 segment-level summarization instances for training and evaluating of performance.",
"## Data Structure",
"## Usage",
"## Acknowledgement\n\nPlease cite the following paper in work that makes use of this dataset:\n\nMeetingBank: A Benchmark Dataset for Meeting Summarization\\\nYebowen Hu, Tim Ganter, Hanieh Deilamsalehy, Franck Dernoncourt, Hassan Foroosh, Fei Liu\\\nIn main conference of Association for Computational Linguistics (ACL'23), Toronto, Canada.",
"## Bibtex",
"## Multi-media Resources\n\nMeetingBank dataset will be hosted at Zenodo. The audio files of each meeting will be hosted individually on Huggingface. All resources will includes meeting audio, transcripts, meetingbank main JSON file, summaries from 6 systems and human annotations.\n\nText & Audio: zenodo, Huggingface(splits, audio&transcripts)\n\nVideos: All meeting videos can be found in URL\n\n- Alameda, Boston, Denver, Long Beach ,King County, Seattle\n\nPython Scripts\nUseful scripts and guidance can be found in github repo MeetingBank_Utils"
]
| [
85,
133,
5,
3,
92,
4,
134
]
| [
"passage: TAGS\n#task_categories-summarization #task_categories-text-generation #size_categories-10M<n<100M #language-English #license-cc-by-nc-sa-4.0 #municipal #meeting #transcripts #benchmark #long-context #arxiv-2305.17529 #region-us \n## Overview\n\nMeetingBank, a benchmark dataset created from the city councils of 6 major U.S. cities to supplement existing datasets. It contains 1,366 meetings with over 3,579 hours of video, as well as transcripts, PDF documents of meeting minutes, agenda, and other metadata. On average, a council meeting is 2.6 hours long and its transcript contains over 28k tokens, making it a valuable testbed for meeting summarizers and for extracting structure from meeting videos. The datasets contains 6,892 segment-level summarization instances for training and evaluating of performance.## Data Structure## Usage## Acknowledgement\n\nPlease cite the following paper in work that makes use of this dataset:\n\nMeetingBank: A Benchmark Dataset for Meeting Summarization\\\nYebowen Hu, Tim Ganter, Hanieh Deilamsalehy, Franck Dernoncourt, Hassan Foroosh, Fei Liu\\\nIn main conference of Association for Computational Linguistics (ACL'23), Toronto, Canada.## Bibtex## Multi-media Resources\n\nMeetingBank dataset will be hosted at Zenodo. The audio files of each meeting will be hosted individually on Huggingface. All resources will includes meeting audio, transcripts, meetingbank main JSON file, summaries from 6 systems and human annotations.\n\nText & Audio: zenodo, Huggingface(splits, audio&transcripts)\n\nVideos: All meeting videos can be found in URL\n\n- Alameda, Boston, Denver, Long Beach ,King County, Seattle\n\nPython Scripts\nUseful scripts and guidance can be found in github repo MeetingBank_Utils"
]
|
25a5863b5a6f1483f71ea814091c8f7cea174eb4 | # Dataset Card for "test-lcm"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | rdcoder/test-lcm | [
"region:us"
]
| 2023-11-10T04:04:07+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 15587429.0, "num_examples": 30}], "download_size": 15580385, "dataset_size": 15587429.0}} | 2023-11-10T04:47:24+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "test-lcm"
More Information needed | [
"# Dataset Card for \"test-lcm\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"test-lcm\"\n\nMore Information needed"
]
| [
6,
14
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"test-lcm\"\n\nMore Information needed"
]
|
235432d0c4f91a8ca1491222a13b03f5af9ab3cf | Dataset using the bert-cased tokenizer, cutoff sentences to 512 length (not sentence pairs), all sentence pairs extracted.
Original datasets:
https://huggingface.co/datasets/bookcorpus
Original dataset: https://huggingface.co/datasets/wikipedia Variant: 20220301.en | gmongaras/BERT_Base_Cased_512_Dataset2 | [
"region:us"
]
| 2023-11-10T04:06:10+00:00 | {"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 26485537877, "num_examples": 109418257}], "download_size": 10245098382, "dataset_size": 26485537877}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-11T00:42:40+00:00 | []
| []
| TAGS
#region-us
| Dataset using the bert-cased tokenizer, cutoff sentences to 512 length (not sentence pairs), all sentence pairs extracted.
Original datasets:
URL
Original dataset: URL Variant: URL | []
| [
"TAGS\n#region-us \n"
]
| [
6
]
| [
"passage: TAGS\n#region-us \n"
]
|
82f3881cd97937a8fc67fb63ca4a7a623b873970 | # Dataset Card for "zaloai-2023-nlp-train-vi"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | vinhtran2611/zaloai-2023-nlp-train-vi | [
"region:us"
]
| 2023-11-10T04:10:05+00:00 | {"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "dev", "path": "data/dev-*"}]}], "dataset_info": {"features": [{"name": "choices", "sequence": "string"}, {"name": "explanation", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "labels", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 248715, "num_examples": 960}, {"name": "test", "num_bytes": 30160, "num_examples": 120}, {"name": "dev", "num_bytes": 30996, "num_examples": 120}], "download_size": 175194, "dataset_size": 309871}} | 2023-11-11T01:39:57+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "zaloai-2023-nlp-train-vi"
More Information needed | [
"# Dataset Card for \"zaloai-2023-nlp-train-vi\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"zaloai-2023-nlp-train-vi\"\n\nMore Information needed"
]
| [
6,
23
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"zaloai-2023-nlp-train-vi\"\n\nMore Information needed"
]
|
130c41bee61898e6995a6966f62dba35451836d7 | # Dataset Card for "complexquestion_FERMI"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | presencesw/complexquestion_FERMI | [
"region:us"
]
| 2023-11-10T04:13:32+00:00 | {"dataset_info": {"features": [{"name": "entities", "sequence": "null"}, {"name": "triplets", "list": [{"name": "question", "dtype": "string"}]}, {"name": "answer", "dtype": "string"}, {"name": "complex_question", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 50299, "num_examples": 185}], "download_size": 27958, "dataset_size": 50299}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-10T04:19:51+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "complexquestion_FERMI"
More Information needed | [
"# Dataset Card for \"complexquestion_FERMI\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"complexquestion_FERMI\"\n\nMore Information needed"
]
| [
6,
17
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"complexquestion_FERMI\"\n\nMore Information needed"
]
|
e83ca72f1166749f0fbfd1a21a6a3ddcd14df4cd | # Dataset Card for "food-100"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | Back-up/food-100 | [
"region:us"
]
| 2023-11-10T04:14:12+00:00 | {"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "options", "list": [{"name": "answer", "dtype": "string"}, {"name": "key", "dtype": "string"}]}, {"name": "answer", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 102965, "num_examples": 101}], "download_size": 26468, "dataset_size": 102965}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]} | 2023-11-10T04:14:16+00:00 | []
| []
| TAGS
#region-us
| # Dataset Card for "food-100"
More Information needed | [
"# Dataset Card for \"food-100\"\n\nMore Information needed"
]
| [
"TAGS\n#region-us \n",
"# Dataset Card for \"food-100\"\n\nMore Information needed"
]
| [
6,
12
]
| [
"passage: TAGS\n#region-us \n# Dataset Card for \"food-100\"\n\nMore Information needed"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.