sha
stringlengths 40
40
| text
stringlengths 0
13.4M
| id
stringlengths 2
117
| tags
list | created_at
stringlengths 25
25
| metadata
stringlengths 2
31.7M
| last_modified
stringlengths 25
25
|
---|---|---|---|---|---|---|
5dd8a2169fcd7200673571d90927f19a1a8fdf02
|
# Dataset Card for "mmlu-anatomy-rule-neg"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
joey234/mmlu-anatomy-rule-neg
|
[
"region:us"
] |
2023-04-27T03:59:05+00:00
|
{"dataset_info": {"features": [{"name": "choices", "sequence": "string"}, {"name": "answer", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}, {"name": "question", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 33692, "num_examples": 135}], "download_size": 19850, "dataset_size": 33692}}
|
2023-04-28T12:46:37+00:00
|
26c04f396139e56d4dff92fb73f0d243aba4b77a
|
# Dataset Card for "mmlu-astronomy-rule-neg"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
joey234/mmlu-astronomy-rule-neg
|
[
"region:us"
] |
2023-04-27T03:59:40+00:00
|
{"dataset_info": {"features": [{"name": "choices", "sequence": "string"}, {"name": "answer", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}, {"name": "question", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 47234, "num_examples": 152}], "download_size": 28038, "dataset_size": 47234}}
|
2023-04-28T12:46:52+00:00
|
e2cf4e0e671f8f916086c954c5189e5889eab540
|
# Dataset Card for "mmlu-business_ethics-rule-neg"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
joey234/mmlu-business_ethics-rule-neg
|
[
"region:us"
] |
2023-04-27T03:59:52+00:00
|
{"dataset_info": {"features": [{"name": "choices", "sequence": "string"}, {"name": "answer", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}, {"name": "question", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 33573, "num_examples": 100}], "download_size": 20450, "dataset_size": 33573}}
|
2023-04-28T12:47:06+00:00
|
0a5152b284709cdcce827ea7000d4279a8f1cbfd
|
# Dataset Card for "mmlu-clinical_knowledge-rule-neg"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
joey234/mmlu-clinical_knowledge-rule-neg
|
[
"region:us"
] |
2023-04-27T04:00:14+00:00
|
{"dataset_info": {"features": [{"name": "choices", "sequence": "string"}, {"name": "answer", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}, {"name": "question", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 63511, "num_examples": 265}], "download_size": 40149, "dataset_size": 63511}}
|
2023-04-28T12:47:28+00:00
|
53126f291ce1b7fc0f14d55b23c5df4314cb9eb5
|
# Dataset Card for "mmlu-college_biology-rule-neg"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
joey234/mmlu-college_biology-rule-neg
|
[
"region:us"
] |
2023-04-27T04:00:42+00:00
|
{"dataset_info": {"features": [{"name": "choices", "sequence": "string"}, {"name": "answer", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}, {"name": "question", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 49310, "num_examples": 144}], "download_size": 31547, "dataset_size": 49310}}
|
2023-04-28T12:47:43+00:00
|
1247baccc5ad5575ff3526c2c3bad1231457441d
|
# Dataset Card for "mmlu-college_chemistry-rule-neg"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
joey234/mmlu-college_chemistry-rule-neg
|
[
"region:us"
] |
2023-04-27T04:01:00+00:00
|
{"dataset_info": {"features": [{"name": "choices", "sequence": "string"}, {"name": "answer", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}, {"name": "question", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 25114, "num_examples": 100}], "download_size": 17279, "dataset_size": 25114}}
|
2023-04-28T12:47:56+00:00
|
1632852e00da5460c631654b9b6f1b731a9a29ce
|
# Dataset Card for "mmlu-college_computer_science-rule-neg"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
joey234/mmlu-college_computer_science-rule-neg
|
[
"region:us"
] |
2023-04-27T04:01:22+00:00
|
{"dataset_info": {"features": [{"name": "choices", "sequence": "string"}, {"name": "answer", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}, {"name": "question", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 41303, "num_examples": 100}], "download_size": 26956, "dataset_size": 41303}}
|
2023-04-28T12:48:23+00:00
|
5474449454de06c7680428df06a15f7efd9dac6f
|
# Dataset Card for "mmlu-college_mathematics-rule-neg"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
joey234/mmlu-college_mathematics-rule-neg
|
[
"region:us"
] |
2023-04-27T04:01:34+00:00
|
{"dataset_info": {"features": [{"name": "choices", "sequence": "string"}, {"name": "answer", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}, {"name": "question", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 24163, "num_examples": 100}], "download_size": 15864, "dataset_size": 24163}}
|
2023-04-28T12:49:05+00:00
|
e324cacf97d73e6e2c12f865253c7291c6a233bc
|
# Dataset Card for "mmlu-college_medicine-rule-neg"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
joey234/mmlu-college_medicine-rule-neg
|
[
"region:us"
] |
2023-04-27T04:01:56+00:00
|
{"dataset_info": {"features": [{"name": "choices", "sequence": "string"}, {"name": "answer", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}, {"name": "question", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 83082, "num_examples": 173}], "download_size": 42212, "dataset_size": 83082}}
|
2023-04-28T12:49:26+00:00
|
805b88cb44a13a1a9e968ca69d38380c8fda8a24
|
# Dataset Card for "mmlu-college_physics-rule-neg"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
joey234/mmlu-college_physics-rule-neg
|
[
"region:us"
] |
2023-04-27T04:02:09+00:00
|
{"dataset_info": {"features": [{"name": "choices", "sequence": "string"}, {"name": "answer", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}, {"name": "question", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 30630, "num_examples": 102}], "download_size": 18326, "dataset_size": 30630}}
|
2023-04-28T12:49:38+00:00
|
3013b7f400259a97bb49e3f81409f64755bf93ee
|
# Dataset Card for "mmlu-computer_security-rule-neg"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
joey234/mmlu-computer_security-rule-neg
|
[
"region:us"
] |
2023-04-27T04:02:20+00:00
|
{"dataset_info": {"features": [{"name": "choices", "sequence": "string"}, {"name": "answer", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}, {"name": "question", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 27322, "num_examples": 100}], "download_size": 18603, "dataset_size": 27322}}
|
2023-04-28T12:49:57+00:00
|
48486e376253a31e02f396be0f8d180ffef471e1
|
# Dataset Card for "mmlu-conceptual_physics-rule-neg"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
joey234/mmlu-conceptual_physics-rule-neg
|
[
"region:us"
] |
2023-04-27T04:02:37+00:00
|
{"dataset_info": {"features": [{"name": "choices", "sequence": "string"}, {"name": "answer", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}, {"name": "question", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 41680, "num_examples": 235}], "download_size": 24838, "dataset_size": 41680}}
|
2023-04-28T12:50:14+00:00
|
c0121e68b87ae9fbcb0a88f98671076003edee3c
|
# Dataset Card for "mmlu-econometrics-rule-neg"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
joey234/mmlu-econometrics-rule-neg
|
[
"region:us"
] |
2023-04-27T04:02:50+00:00
|
{"dataset_info": {"features": [{"name": "choices", "sequence": "string"}, {"name": "answer", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}, {"name": "question", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 46690, "num_examples": 114}], "download_size": 24025, "dataset_size": 46690}}
|
2023-04-28T12:50:27+00:00
|
a3fd98db9b3ce97cdb2a69975d231d4906b2409b
|
demoww/database
|
[
"license:openrail",
"region:us"
] |
2023-04-27T04:50:59+00:00
|
{"license": "openrail"}
|
2023-04-27T04:50:59+00:00
|
|
9b3552af5b8922c63c661c3242cb00d6d2125835
|
# Dataset Card for "my_controlnet"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cr7Por/my_controlnet
|
[
"region:us"
] |
2023-04-27T04:53:18+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "image_crop", "dtype": "image"}, {"name": "image_caption", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 135354742.0, "num_examples": 435}], "download_size": 135278720, "dataset_size": 135354742.0}}
|
2023-04-27T04:55:39+00:00
|
e41617801a48bb315d3df6febce6e077737e46b0
|
desertfox/hsereg2022
|
[
"license:afl-3.0",
"region:us"
] |
2023-04-27T05:51:33+00:00
|
{"license": "afl-3.0"}
|
2023-04-27T05:51:33+00:00
|
|
fc4776afc332c876fe61547dd9b8720db53b26a8
|
Tireddog/BookRatingRecSys
|
[
"license:ecl-2.0",
"region:us"
] |
2023-04-27T06:09:19+00:00
|
{"license": "ecl-2.0"}
|
2023-04-27T06:40:34+00:00
|
|
7e9ddcd6de1a9defc52e0a39ae38c9ea28b77817
|
This dataset is the WizardLM dataset victor123/evol_instruct_70k, removing instances of blatant alignment.
54974 instructions remain.
inspired by https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered
All credit to anon8231489123 for the cleanup script that I adapted to wizardlm_clean.py
---
license: apache-2.0
language:
- en
pretty_name: wizardlm-unfiltered
---
|
cognitivecomputations/WizardLM_alpaca_evol_instruct_70k_unfiltered
|
[
"region:us"
] |
2023-04-27T06:12:18+00:00
|
{}
|
2023-04-28T06:36:17+00:00
|
33bc60f0ec1f8410d8f7300e41fea89b65ac2d84
|
# Dataset Card for "x-fact"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
metaeval/x-fact
|
[
"region:us"
] |
2023-04-27T06:24:30+00:00
|
{"dataset_info": {"features": [{"name": "evidence", "dtype": "string"}, {"name": "claim", "dtype": "string"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 44250130, "num_examples": 30162}, {"name": "dev", "num_bytes": 3501110, "num_examples": 2393}, {"name": "test", "num_bytes": 5214557, "num_examples": 3597}], "download_size": 29968985, "dataset_size": 52965797}}
|
2023-04-27T06:26:56+00:00
|
ba32bfae6ca11748275b15943cff7a105251fb0d
|
# Dataset Card for "AI-Superstar-Dataset"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
FourthBrainGenAI/AI-Superstar-Dataset
|
[
"region:us"
] |
2023-04-27T06:30:47+00:00
|
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 44747, "num_examples": 148}], "download_size": 23888, "dataset_size": 44747}}
|
2023-04-27T06:30:50+00:00
|
35d2188cb09bf8407f6c7b4d4e2cbda425d2a1a2
|
# Tagesschau Archive Article Dataset
A scrape of Tagesschau.de articles from 01.01.2018 to 26.04.2023. Find all source code in [github.com/bjoernpl/tagesschau](https://github.com/bjoernpl/tagesschau).
## Dataset Information
CSV structure:
| Field | Description |
| --- | --- |
| `date` | Date of the article |
| `headline` | Title of the article |
| `short_headline` | A short headline / Context |
| `short_text` | A brief summary of the article |
| `article` | The full text of the article |
| `href` | The href of the article on tagesschau.de |
Size:
The final dataset (2018-today) contains 225202 articles from 1942 days. Of these articles only
21848 are unique (Tagesschau often keeps articles in circulation for ~1 month). The total download
size is ~65MB.
Cleaning:
- Duplicate articles are removed
- Articles with empty text are removed
- Articles with empty short_texts are removed
- Articles, headlines and short_headlines are stripped of leading and trailing whitespace
More details in [`clean.py`](https://github.com/bjoernpl/tagesschau/blob/main/clean.py).
|
bjoernp/tagesschau-2018-2023
|
[
"size_categories:10K<n<100K",
"language:de",
"region:us"
] |
2023-04-27T06:49:50+00:00
|
{"language": ["de"], "size_categories": ["10K<n<100K"], "dataset_info": {"features": [{"name": "date", "dtype": "string"}, {"name": "headline", "dtype": "string"}, {"name": "short_headline", "dtype": "string"}, {"name": "short_text", "dtype": "string"}, {"name": "article", "dtype": "string"}, {"name": "link", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 107545823, "num_examples": 21847}], "download_size": 63956047, "dataset_size": 107545823}}
|
2023-04-27T08:04:08+00:00
|
21ea494d34547f43a529612e9cebbfe8454c22b1
|
# Dataset Card for "cv11_ar_mix"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
MohammedNasri/cv11_ar_mix
|
[
"region:us"
] |
2023-04-27T07:14:50+00:00
|
{"dataset_info": {"features": [{"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 36960805056, "num_examples": 38481}, {"name": "test", "num_bytes": 10027431536, "num_examples": 10440}], "download_size": 4276265822, "dataset_size": 46988236592}}
|
2023-04-27T07:26:14+00:00
|
c008b4afff727ce095b095700485fd5c9eb67aec
|
# Rayla - Moon Elf from "The Dragon Prince" Netflix series
## Dataset Description
54 hand-picked images at 1024x1024 resolution
Captions are generated by wd14-tagger and pruned for any inaccuracies
3 Token trigger word "raylaDP" is added as the first caption on each file
|
attackparent/Rayla-TheDragonPrince
|
[
"region:us"
] |
2023-04-27T07:23:36+00:00
|
{}
|
2023-04-27T07:30:29+00:00
|
e97ec8be10fcaadc7843de07611315fb7a1b4191
|
pictures of views from an office window in Beijing from the same spot, same angle at noon of different dates
address of the office: TECHART PLAZA, NO.30 XUEYUAN ROAD floor 8
potential application:
learn air quality and weather from picture
|
rub1sc0/beijing-views
|
[
"license:gpl-3.0",
"Beijing",
"Weather",
"Air quality",
"photos",
"region:us"
] |
2023-04-27T08:29:07+00:00
|
{"license": "gpl-3.0", "tags": ["Beijing", "Weather", "Air quality", "photos"]}
|
2023-04-27T08:36:51+00:00
|
abe866f10e3c3fc2a7fc43dadaacb3f6af63e3d3
|
# Dataset Card for "product_sentiment_machine_hack"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
james-burton/product_sentiment_machine_hack
|
[
"region:us"
] |
2023-04-27T08:36:14+00:00
|
{"dataset_info": {"features": [{"name": "Product_Description", "dtype": "string"}, {"name": "Product_Type", "dtype": "int64"}, {"name": "Sentiment", "dtype": {"class_label": {"names": {"0": "0", "1": "1", "2": "2", "3": "3"}}}}], "splits": [{"name": "train", "num_bytes": 539691.8956982911, "num_examples": 4327}, {"name": "validation", "num_bytes": 95291.1043017089, "num_examples": 764}, {"name": "test", "num_bytes": 159788, "num_examples": 1273}], "download_size": 442311, "dataset_size": 794771.0}}
|
2023-04-27T08:36:23+00:00
|
1c4a58181f460f0e2020e33f36303b0cf9010a03
|
# Dataset Card for "CuPL_DaVinci_captioned_CUB2002011_train"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
anjunhu/CuPL_DaVinci_captioned_CUB2002011_train
|
[
"region:us"
] |
2023-04-27T09:08:01+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 166122794.75, "num_examples": 5994}], "download_size": 165787380, "dataset_size": 166122794.75}}
|
2023-04-27T09:08:20+00:00
|
a3db8473b759c619c0d1f1657ffa028a89c87ea7
|
# Dataset Card for "MuGeminorum/AAL-statistics-volumn"
The AAL (Automated Anatomical Labeling) Statistics Volumetric dataset provides a comprehensive collection of brain volumetric measurements based on the AAL atlas. It encompasses statistical information on brain regions derived from structural magnetic resonance imaging (MRI) scans. Researchers commonly utilize this dataset for investigations related to neuroimaging, neuroscience, and brain structure analysis. The AAL Statistics Volumetric dataset plays a pivotal role in advancing our understanding of brain anatomy, enabling the development and evaluation of algorithms for automated brain region identification and volumetric analysis. With its wealth of volumetric data derived from diverse individuals, this dataset serves as a valuable resource for studies aimed at characterizing variations in brain structures across populations and contributing to advancements in neuroscientific research.
## Usage
```python
from datasets import load_dataset
data = load_dataset("MuGeminorum/AAL-statistics-volumn", split='train')
for item in data:
print(item)
```
## Maintenance
```bash
git clone [email protected]:datasets/MuGeminorum/AAL-statistics-volumn
```
## Mirror
<https://www.modelscope.cn/datasets/MuGeminorum/AAL_statistics_volumn>
## Reference
[1] [Chapter II ‐ Classifying AD patients and normal controls from brain images](https://github.com/MuGeminorum/Medical_Image_Computing/wiki/Chapter-II-%E2%80%90-Classifying-AD-patients-and-normal-controls-from-brain-images)
|
MuGeminorum/AAL_statistics_volumn
|
[
"task_categories:image-classification",
"task_categories:feature-extraction",
"size_categories:n<1K",
"language:en",
"license:mit",
"biology",
"medical",
"region:us"
] |
2023-04-27T09:21:31+00:00
|
{"language": ["en"], "license": "mit", "size_categories": ["n<1K"], "task_categories": ["image-classification", "feature-extraction"], "pretty_name": "AAL Statistics Volumn", "tags": ["biology", "medical"]}
|
2024-01-13T01:52:38+00:00
|
43e1b4fb2314b5e5430b85edb26d087ed8e18aa0
|
# Dataset Card for "HF-Syringe-Dataset"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Murali0604/HF-Syringe-Dataset
|
[
"region:us"
] |
2023-04-27T09:54:04+00:00
|
{"dataset_info": {"features": [{"name": "pixel_values", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 137911723.0, "num_examples": 12}], "download_size": 10051906, "dataset_size": 137911723.0}}
|
2023-05-02T07:39:41+00:00
|
324fbf705f4f75edb7b431a7e0a444471b4bba63
|
```bib
@inproceedings{lamprinidis2021universal,
title={Universal Joy A Dataset and Results for Classifying Emotions Across Languages},
author={Lamprinidis, Sotiris and Bianchi, Federico and Hardt, Daniel and Hovy, Dirk},
year={2021},
volume={11th Workshop on Computational Approaches to Subjectivity, Sentiment & Social Media Analysis (WASSA 2021)}
organization={Association for Computational Linguistics}
}
```
|
metaeval/universal-joy
|
[
"task_categories:text-classification",
"license:gpl",
"multilingual",
"emotion",
"region:us"
] |
2023-04-27T09:55:41+00:00
|
{"license": "gpl", "task_categories": ["text-classification"], "tags": ["multilingual", "emotion"]}
|
2023-04-27T09:58:46+00:00
|
09479c0444ab2b5038fe1343830713866db59ce8
|
# Dataset Card for "pubmedlongtokenised"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
reginaboateng/pubmedlongtokenised
|
[
"region:us"
] |
2023-04-27T10:07:37+00:00
|
{"dataset_info": {"features": [{"name": "article", "dtype": "string"}, {"name": "abstract", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "target_ids", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 2627110875, "num_examples": 119924}, {"name": "validation", "num_bytes": 148141199, "num_examples": 6633}], "download_size": 1251052672, "dataset_size": 2775252074}}
|
2023-04-27T10:08:58+00:00
|
acef19ba3353523b66f9e72bcbfa05441fcf5472
|
nilaytufek/OPC_UA_NER_BC
|
[
"task_categories:text-classification",
"language:en",
"license:apache-2.0",
"doi:10.57967/hf/1424",
"region:us"
] |
2023-04-27T10:11:16+00:00
|
{"language": ["en"], "license": "apache-2.0", "task_categories": ["text-classification"]}
|
2023-12-05T09:49:52+00:00
|
|
e60319b5de9a55e19c5e26a4e8338ae582d819a2
|
ashokpoudel/personal
|
[
"license:unknown",
"region:us"
] |
2023-04-27T10:13:00+00:00
|
{"license": "unknown"}
|
2023-04-28T03:07:30+00:00
|
|
c59f856f9cb2b5b0794d5c836289f6f122a9196d
|
# ChatGPT3.5 Noisy Translation Manglish
Notebooks at https://github.com/mesolitica/malaysian-dataset/tree/master/translation/chatgpt3.5-manglish
|
mesolitica/chatgpt-noisy-translation-manglish
|
[
"task_categories:translation",
"language:ms",
"region:us"
] |
2023-04-27T10:22:16+00:00
|
{"language": ["ms"], "task_categories": ["translation"]}
|
2023-12-17T04:08:00+00:00
|
5805e0d393e5abf314251471fd9585462ea75024
|
# Dataset Card for "donut-funsd"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
arvisioncode/donut-funsd
|
[
"region:us"
] |
2023-04-27T10:22:33+00:00
|
{"dataset_info": {"features": [{"name": "ground_truth", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 25994868.0, "num_examples": 147}, {"name": "test", "num_bytes": 9129119.0, "num_examples": 47}, {"name": "validation", "num_bytes": 9129119.0, "num_examples": 47}], "download_size": 44182619, "dataset_size": 44253106.0}}
|
2023-04-28T08:16:03+00:00
|
575f2cb3db1dd711cfcb669556d891e6bac73882
|
# Dataset Card for "oasst1_dense_flat"
[OASST1 dataset](https://huggingface.co/datasets/OpenAssistant/oasst1)
But where with retrieved parent_text, and where we only keep messages with dense annotations (all labels have 2 annotators)
```python
from datasets import Dataset, DatasetDict
d={}
for split in ['train','validation']:
df=load_dataset("OpenAssistant/oasst1")[split].to_pandas()
m2t=df.set_index("message_id")['text'].to_dict()
df['parent_text']=df.parent_id.map(lambda x: m2t.get(x,''))
df=df[df.labels.map(lambda x:x!=None)]
df=df[df.labels.map(lambda x:x['count'].min()>2)]
labels=df.labels.map(lambda x:list(x['name'])).value_counts().index[0]
df=df[df.labels.map(lambda x:x!=None)]
df=df[df.labels.map(lambda x:list(x['name'])==labels)]
for label in labels:
df[label]=df.labels.map(lambda x: x['value'][list(x['name']).index(label)])
d[split]=Dataset.from_pandas(df,preserve_index=False)
DatasetDict(d).push_to_hub('oasst1_dense_flat')
```
https://github.com/LAION-AI/Open-Assistant
```
@article{kopf2023openassistant,
title={OpenAssistant Conversations--Democratizing Large Language Model Alignment},
author={K{\"o}pf, Andreas and Kilcher, Yannic and von R{\"u}tte, Dimitri and Anagnostidis, Sotiris and Tam, Zhi-Rui and Stevens, Keith and Barhoum, Abdullah and Duc, Nguyen Minh and Stanley, Oliver and Nagyfi, Rich{\'a}rd and others},
journal={arXiv preprint arXiv:2304.07327},
year={2023}
}
```
|
tasksource/oasst1_dense_flat
|
[
"license:apache-2.0",
"region:us"
] |
2023-04-27T10:48:06+00:00
|
{"license": "apache-2.0", "dataset_info": {"features": [{"name": "message_id", "dtype": "string"}, {"name": "parent_id", "dtype": "string"}, {"name": "user_id", "dtype": "string"}, {"name": "created_date", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "role", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "review_count", "dtype": "int32"}, {"name": "review_result", "dtype": "bool"}, {"name": "deleted", "dtype": "bool"}, {"name": "rank", "dtype": "float64"}, {"name": "synthetic", "dtype": "bool"}, {"name": "model_name", "dtype": "null"}, {"name": "detoxify", "struct": [{"name": "identity_attack", "dtype": "float64"}, {"name": "insult", "dtype": "float64"}, {"name": "obscene", "dtype": "float64"}, {"name": "severe_toxicity", "dtype": "float64"}, {"name": "sexual_explicit", "dtype": "float64"}, {"name": "threat", "dtype": "float64"}, {"name": "toxicity", "dtype": "float64"}]}, {"name": "message_tree_id", "dtype": "string"}, {"name": "tree_state", "dtype": "string"}, {"name": "emojis", "struct": [{"name": "count", "sequence": "int32"}, {"name": "name", "sequence": "string"}]}, {"name": "labels", "struct": [{"name": "count", "sequence": "int32"}, {"name": "name", "sequence": "string"}, {"name": "value", "sequence": "float64"}]}, {"name": "parent_text", "dtype": "string"}, {"name": "spam", "dtype": "float64"}, {"name": "fails_task", "dtype": "float64"}, {"name": "lang_mismatch", "dtype": "float64"}, {"name": "pii", "dtype": "float64"}, {"name": "not_appropriate", "dtype": "float64"}, {"name": "hate_speech", "dtype": "float64"}, {"name": "sexual_content", "dtype": "float64"}, {"name": "quality", "dtype": "float64"}, {"name": "toxicity", "dtype": "float64"}, {"name": "humor", "dtype": "float64"}, {"name": "helpfulness", "dtype": "float64"}, {"name": "creativity", "dtype": "float64"}, {"name": "violence", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 59657796, "num_examples": 34059}, {"name": "validation", "num_bytes": 3164029, "num_examples": 1816}], "download_size": 25173939, "dataset_size": 62821825}}
|
2023-05-31T07:49:36+00:00
|
363085846ce8a21e5765555ce4d54e40bee7379d
|
Iacan/teste
|
[
"license:openrail",
"region:us"
] |
2023-04-27T10:55:15+00:00
|
{"license": "openrail"}
|
2023-04-27T10:55:15+00:00
|
|
932910661f1fb318a27580be9a652479ba7a094d
|
# Dataset Card for "fake_job_postings2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
james-burton/fake_job_postings2
|
[
"region:us"
] |
2023-04-27T11:21:30+00:00
|
{"dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "salary_range", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "required_experience", "dtype": "string"}, {"name": "required_education", "dtype": "string"}, {"name": "fraudulent", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}], "splits": [{"name": "train", "num_bytes": 14551021.06907662, "num_examples": 10816}, {"name": "validation", "num_bytes": 2568222.930923379, "num_examples": 1909}, {"name": "test", "num_bytes": 4359176, "num_examples": 3182}], "download_size": 12161172, "dataset_size": 21478420.0}}
|
2023-04-27T11:21:40+00:00
|
9641d98d75ad6f9e6e8c5d2536e92c000bb46d77
|
# Dataset Card for Dataset Name
## Dataset Description
- **Homepage:**
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1).
This dataset is a clean version (all NanN removed) of this dataset : https://www.kaggle.com/datasets/devicharith/language-translation-englishfrench . I'm not the person who posted it first on Kaggle.
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
[More Information Needed]
|
PaulineSanchez/Translation_words_and_sentences_english_french
|
[
"task_categories:translation",
"language:en",
"language:fr",
"words",
"sentences",
"everyday life",
"casual",
"region:us"
] |
2023-04-27T11:23:20+00:00
|
{"language": ["en", "fr"], "task_categories": ["translation"], "tags": ["words", "sentences", "everyday life", "casual"]}
|
2023-04-27T11:29:31+00:00
|
45ef34dbb242602875cfe30a24b294354e9679ad
|
# Dataset Card for "FLAIR_OSM_CLIP"
Dataset for the Seg2Sat model: https://github.com/RubenGres/Seg2Sat
Derived from [FLAIR#1](https://huggingface.co/datasets/IGNF/FLAIR) train split.
This dataset incudes the following features:
- **image**: FLAIR#1 .tif files RBG bands converted into a more managable jpg format
- **segmentation**: FLAIR#1 segmentation converted to JPG using the [LUT from the documentation](https://ignf.github.io/FLAIR/index_fr.html)
- **metadata**: OSM metadata for the centroid of the image
- **clip_label**: [CLIP ViT-H](https://github.com/openai/CLIP) description
- **class_rep**: ratio of appearance of each class in the segmentation
- **prompt**: Prompt used for the Seg2Sat model
<!---
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
--->
|
IGNF/FLAIR_1_osm_clip
|
[
"size_categories:10K<n<100K",
"IGN",
"region:us"
] |
2023-04-27T11:28:28+00:00
|
{"size_categories": ["10K<n<100K"], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "segmentation", "dtype": "image"}, {"name": "metadata", "struct": [{"name": "address", "struct": [{"name": "ISO3166-2-lvl4", "dtype": "string"}, {"name": "ISO3166-2-lvl6", "dtype": "string"}, {"name": "ISO3166-2-lvl7", "dtype": "string"}, {"name": "aerialway", "dtype": "string"}, {"name": "aeroway", "dtype": "string"}, {"name": "allotments", "dtype": "string"}, {"name": "amenity", "dtype": "string"}, {"name": "bridge", "dtype": "string"}, {"name": "building", "dtype": "string"}, {"name": "city", "dtype": "string"}, {"name": "city_district", "dtype": "string"}, {"name": "club", "dtype": "string"}, {"name": "commercial", "dtype": "string"}, {"name": "country", "dtype": "string"}, {"name": "country_code", "dtype": "string"}, {"name": "county", "dtype": "string"}, {"name": "craft", "dtype": "string"}, {"name": "emergency", "dtype": "string"}, {"name": "farm", "dtype": "string"}, {"name": "farmyard", "dtype": "string"}, {"name": "hamlet", "dtype": "string"}, {"name": "healthcare", "dtype": "string"}, {"name": "highway", "dtype": "string"}, {"name": "historic", "dtype": "string"}, {"name": "house_number", "dtype": "string"}, {"name": "industrial", "dtype": "string"}, {"name": "isolated_dwelling", "dtype": "string"}, {"name": "junction", "dtype": "string"}, {"name": "landuse", "dtype": "string"}, {"name": "leisure", "dtype": "string"}, {"name": "locality", "dtype": "string"}, {"name": "man_made", "dtype": "string"}, {"name": "military", "dtype": "string"}, {"name": "municipality", "dtype": "string"}, {"name": "natural", "dtype": "string"}, {"name": "neighbourhood", "dtype": "string"}, {"name": "office", "dtype": "string"}, {"name": "place", "dtype": "string"}, {"name": "postcode", "dtype": "string"}, {"name": "province", "dtype": "string"}, {"name": "quarter", "dtype": "string"}, {"name": "railway", "dtype": "string"}, {"name": "region", "dtype": "string"}, {"name": "residential", "dtype": "string"}, {"name": "retail", "dtype": "string"}, {"name": "road", "dtype": "string"}, {"name": "shop", "dtype": "string"}, {"name": "square", "dtype": "string"}, {"name": "state", "dtype": "string"}, {"name": "state_district", "dtype": "string"}, {"name": "suburb", "dtype": "string"}, {"name": "tourism", "dtype": "string"}, {"name": "town", "dtype": "string"}, {"name": "village", "dtype": "string"}]}, {"name": "boundingbox", "sequence": "string"}, {"name": "camera", "dtype": "string"}, {"name": "code", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "dimensions", "sequence": "int64"}, {"name": "display_name", "dtype": "string"}, {"name": "domain", "dtype": "string"}, {"name": "lat", "dtype": "string"}, {"name": "latlong", "sequence": "float64"}, {"name": "licence", "dtype": "string"}, {"name": "lon", "dtype": "string"}, {"name": "origin", "sequence": "float64"}, {"name": "osm_id", "dtype": "int64"}, {"name": "osm_type", "dtype": "string"}, {"name": "patch_centroid_x", "dtype": "float64"}, {"name": "patch_centroid_y", "dtype": "float64"}, {"name": "patch_centroid_z", "dtype": "float64"}, {"name": "place_id", "dtype": "int64"}, {"name": "time", "dtype": "string"}, {"name": "unit_system", "dtype": "string"}, {"name": "zone", "dtype": "string"}]}, {"name": "class_rep", "struct": [{"name": "class_repartition", "struct": [{"name": "agricultural land", "dtype": "float64"}, {"name": "bare soil", "dtype": "float64"}, {"name": "brushwood", "dtype": "float64"}, {"name": "building", "dtype": "float64"}, {"name": "clear cut", "dtype": "float64"}, {"name": "coniferous", "dtype": "float64"}, {"name": "deciduous", "dtype": "float64"}, {"name": "greenhouse", "dtype": "float64"}, {"name": "herbaceous vegetation", "dtype": "float64"}, {"name": "impervious surface", "dtype": "float64"}, {"name": "ligneous", "dtype": "float64"}, {"name": "mixed", "dtype": "float64"}, {"name": "other", "dtype": "float64"}, {"name": "pervious surface", "dtype": "float64"}, {"name": "plowed land", "dtype": "float64"}, {"name": "snow", "dtype": "float64"}, {"name": "swimming pool", "dtype": "float64"}, {"name": "vineyard", "dtype": "float64"}, {"name": "water", "dtype": "float64"}]}]}, {"name": "prompt", "dtype": "string"}, {"name": "clip_label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 26842989610.0, "num_examples": 61712}], "download_size": 26786210818, "dataset_size": 26842989610.0}, "tags": ["IGN"], "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
|
2023-11-24T10:45:49+00:00
|
38c62f6ee905f7b27ee9f8be1e72f06c2b3994c0
|
tlhcelik/test_dataset
|
[
"task_categories:text-classification",
"size_categories:1K<n<10K",
"language:en",
"language:tr",
"license:wtfpl",
"legal",
"region:us"
] |
2023-04-27T12:00:50+00:00
|
{"language": ["en", "tr"], "license": "wtfpl", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "tags": ["legal"]}
|
2023-04-27T12:01:32+00:00
|
|
21521bc176430f4dfeabff620b4e0993ab7bb799
|
mlnchk/CL_nature
|
[
"license:mit",
"region:us"
] |
2023-04-27T12:08:17+00:00
|
{"license": "mit"}
|
2023-04-27T12:18:26+00:00
|
|
21e19561b7be01f58d8c060f477f264da349ff87
|
# Dataset Card for "train-model1-dataset"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
UchihaMadara/train-model1-dataset
|
[
"region:us"
] |
2023-04-27T12:20:48+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "sentiments", "sequence": "int64"}, {"name": "food", "sequence": "int64"}, {"name": "service", "sequence": "int64"}, {"name": "price", "sequence": "int64"}, {"name": "ambience", "sequence": "int64"}, {"name": "anecdotes/miscellaneous", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 2367682, "num_examples": 3043}], "download_size": 213398, "dataset_size": 2367682}}
|
2023-04-27T12:20:50+00:00
|
f1b4e48c408b1831bde69ea2f08f481d01001c42
|
# Dataset Card for "validate-dataset-model1"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
UchihaMadara/validate-dataset-model1
|
[
"region:us"
] |
2023-04-27T12:23:54+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "sentiments", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 98465, "num_examples": 800}], "download_size": 44572, "dataset_size": 98465}}
|
2023-04-27T12:23:56+00:00
|
fe836dc3fc9dd65ed4308ff20c63c32cb642813b
|
# Dataset Card for "grabo"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
qmeeus/grabo
|
[
"region:us"
] |
2023-04-27T12:28:08+00:00
|
{"dataset_info": {"features": [{"name": "uttid", "dtype": "string"}, {"name": "audio", "dtype": "audio"}, {"name": "text", "dtype": "string"}, {"name": "intent", "dtype": "string"}], "splits": [{"name": "pp10", "num_bytes": 228028321.0, "num_examples": 540}, {"name": "pp11", "num_bytes": 211504118.0, "num_examples": 541}, {"name": "pp12", "num_bytes": 322474928.0, "num_examples": 540}, {"name": "pp2", "num_bytes": 233171644.0, "num_examples": 541}, {"name": "pp3", "num_bytes": 300904068.0, "num_examples": 540}, {"name": "pp4", "num_bytes": 199806236.0, "num_examples": 540}, {"name": "pp5", "num_bytes": 229715190.0, "num_examples": 540}, {"name": "pp6", "num_bytes": 371927769.0, "num_examples": 574}, {"name": "pp7", "num_bytes": 188155834.0, "num_examples": 571}, {"name": "pp8", "num_bytes": 236232429.0, "num_examples": 540}, {"name": "pp9", "num_bytes": 302685363.0, "num_examples": 540}], "download_size": 2694175888, "dataset_size": 2824605900.0}}
|
2023-04-27T12:35:47+00:00
|
d0f696c2b56690e3be5b7028612f0b6011e074fe
|
# Dataset Card for "kick_starter_funding"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
james-burton/kick_starter_funding
|
[
"region:us"
] |
2023-04-27T12:45:10+00:00
|
{"dataset_info": {"features": [{"name": "name", "dtype": "string"}, {"name": "desc", "dtype": "string"}, {"name": "goal", "dtype": "float64"}, {"name": "keywords", "dtype": "string"}, {"name": "disable_communication", "dtype": "bool"}, {"name": "country", "dtype": "string"}, {"name": "currency", "dtype": "string"}, {"name": "deadline", "dtype": "int64"}, {"name": "created_at", "dtype": "int64"}, {"name": "final_status", "dtype": {"class_label": {"names": {"0": "0", "1": "1"}}}}], "splits": [{"name": "train", "num_bytes": 20192646.60708423, "num_examples": 73526}, {"name": "validation", "num_bytes": 3563634.392915771, "num_examples": 12976}, {"name": "test", "num_bytes": 5935728, "num_examples": 21626}], "download_size": 0, "dataset_size": 29692009.0}}
|
2023-04-27T13:24:15+00:00
|
464e6e1b037cfbe322fa9a9fbf6021aaf2798ff6
|
houck2040/research_news
|
[
"license:mit",
"region:us"
] |
2023-04-27T12:53:32+00:00
|
{"license": "mit"}
|
2023-04-27T12:54:01+00:00
|
|
ef4b6ed5d41535176da82803717d318b0d471fc3
|
nor-violet7/vits_chinese
|
[
"license:openrail",
"region:us"
] |
2023-04-27T13:03:40+00:00
|
{"license": "openrail"}
|
2023-04-27T13:03:40+00:00
|
|
aae39191a08244976e9da0df6305981ecfb2e61e
|
# Dataset Card for "jigsaw_unintended_bias100K"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
james-burton/jigsaw_unintended_bias100K
|
[
"region:us"
] |
2023-04-27T13:05:03+00:00
|
{"dataset_info": {"features": [{"name": "comment_text", "dtype": "string"}, {"name": "asian", "dtype": "float64"}, {"name": "atheist", "dtype": "float64"}, {"name": "bisexual", "dtype": "float64"}, {"name": "black", "dtype": "float64"}, {"name": "buddhist", "dtype": "float64"}, {"name": "christian", "dtype": "float64"}, {"name": "female", "dtype": "float64"}, {"name": "heterosexual", "dtype": "float64"}, {"name": "hindu", "dtype": "float64"}, {"name": "homosexual_gay_or_lesbian", "dtype": "float64"}, {"name": "intellectual_or_learning_disability", "dtype": "float64"}, {"name": "jewish", "dtype": "float64"}, {"name": "latino", "dtype": "float64"}, {"name": "male", "dtype": "float64"}, {"name": "muslim", "dtype": "float64"}, {"name": "other_disability", "dtype": "float64"}, {"name": "other_gender", "dtype": "float64"}, {"name": "other_race_or_ethnicity", "dtype": "float64"}, {"name": "other_religion", "dtype": "float64"}, {"name": "other_sexual_orientation", "dtype": "float64"}, {"name": "physical_disability", "dtype": "float64"}, {"name": "psychiatric_or_mental_illness", "dtype": "float64"}, {"name": "transgender", "dtype": "float64"}, {"name": "white", "dtype": "float64"}, {"name": "funny", "dtype": "int64"}, {"name": "wow", "dtype": "int64"}, {"name": "sad", "dtype": "int64"}, {"name": "likes", "dtype": "int64"}, {"name": "disagree", "dtype": "int64"}, {"name": "target", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 46979913.1, "num_examples": 85000}, {"name": "validation", "num_bytes": 8290572.9, "num_examples": 15000}, {"name": "test", "num_bytes": 13825536, "num_examples": 25000}], "download_size": 29047323, "dataset_size": 69096022.0}}
|
2023-04-27T13:05:31+00:00
|
f9a27f2578d6ae44293e6397dc079a0445ecfc48
|
foilfoilfoil/PersonalDiscordDialouges
|
[
"license:unknown",
"region:us"
] |
2023-04-27T13:12:23+00:00
|
{"license": "unknown"}
|
2023-04-27T13:12:23+00:00
|
|
1322cc008a02ccd289a77f65ba301bda55805c23
|
# Dataset Card for "CuPL_DaVinci_captioned_CUB2002011_test"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
anjunhu/CuPL_DaVinci_captioned_CUB2002011_test
|
[
"region:us"
] |
2023-04-27T14:03:43+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 158722958.75, "num_examples": 5794}], "download_size": 158383326, "dataset_size": 158722958.75}}
|
2023-04-27T14:04:02+00:00
|
04ca5e41940c58294ce26bcd86d4c0e2bc621453
|
# Dataset Card for "VQAv2_validation_no_image_google_flan_t5_xxl_mode_A_T_D_PNP_FILTER_C_Q_rices_ns_100"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
CVasNLPExperiments/VQAv2_validation_no_image_google_flan_t5_xxl_mode_A_T_D_PNP_FILTER_C_Q_rices_ns_100
|
[
"region:us"
] |
2023-04-27T14:22:57+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "prompt", "sequence": "string"}, {"name": "question", "dtype": "string"}, {"name": "true_label", "sequence": "string"}, {"name": "prediction", "dtype": "string"}], "splits": [{"name": "fewshot_0_clip_tags_LAION_ViT_H_14_2B_with_openai_Attributes_LAION_ViT_H_14_2B_descriptors_text_davinci_003_full_DETA_detections_deta_swin_large_o365_coco_classes_caption_module_random_", "num_bytes": 1021137, "num_examples": 100}], "download_size": 97033, "dataset_size": 1021137}}
|
2023-05-04T06:00:34+00:00
|
0036824302c7daffe97d68417b0f50a4eafb583d
|
# Dataset Card for "thai_ser"
[ORIGINAL DATASET HERE](https://github.com/vistec-AI/dataset-releases/releases/tag/v1)
AI Research Institute of Thailand (AIResearch), with the collaboration between Vidyasirimedhi Institute of Science and Technology (VISTEC) and Digital Economy Promotion Agency (depa), cooperating with Department of Computer Engineering - Faculty of Engineering and Department of Dramatic Arts - Faculty of Arts, Chulalongkorn University, publishes an open Thai speech emotion recognition dataset, with the sponsorship from Advanced Info Services Public Company Limited (AIS), namely THAI SER.
This dataset consists of 5 main emotions assigned to actors: Neutral, Anger, Happiness, Sadness, and Frustration. The recordings were 41 hours, 36 minutes long (27,854 utterances), and were performed by 200 professional actors (112 female, 88 male) and directed by students, former alumni, and professors from the Faculty of Arts, Chulalongkorn University.
The THAI SER contains 100 recordings and is separated into two main categories: Studio and Zoom. Studio recordings also consist of two studio environments: Studio A, a controlled studio room with soundproof walls, and Studio B, a normal room without soundproof or noise control. Thus the recording environment can be concluded as follows:
```
StudioA (noise controlled, soundproof wall)
└─ studio001
└─ studio002
...
└─ studio018
StudioB (Normal room without soundproof wall)
└─ studio019
└─ studio020
...
└─ studio080
Zoom (Recorded online via Zoom and Zencastr)
└─ zoom001
└─ zoom002
...
└─ zoom020
```
Each recording is separated into two sessions: Script Session and Improvisation Session.
To mapped each utterance to an emotion, we use majority voted of answer from 3-8 annotators which collected from crowdsourcing (wang.in.th).
Script session
In the script session, the actor was assigned three sentences:
```
sentence 1: พรุ่งนี้มันวันหยุดราชการนะรู้รึยัง หยุดยาวด้วย
(Do you know tomorrow is a public holiday and it's the long one.)
sentence 2: อ่านหนังสือพิมพ์วันนี้รึยัง รู้ไหมเรื่องนั้นกลายเป็นข่าวใหญ่ไปแล้ว
(Have you read today's newspaper, that story was the topliner.)
sentence 3: ก่อนหน้านี้ก็ยังเห็นทำตัวปกติดี ใครจะไปรู้หล่ะ ว่าเค้าคิดแบบนั้น
(He/She was acting normal recently, who would thought that he/she would think like that.)
```
The actor was asked to speak each sentence two times for each emotion with two emotional intensity levels (normal, strong), with an additional neutral expression.
Improvisation session
For the Improvisation session, two actors were asked to improvised according to provided emotion and scenario.
```
Scenarios Actor A Actor B
1 (Neutral) A hotel receptionist trying to explain and service the customer (Angry) A angry customer who dissatisfy the hotel services
2 (Happy) A person excitingly talking with B about his/her marriage plan (Happy) A person happily talking with A and help him/her plan his ceremony
3 (Sad) A patient feeling depressed (Neutral) A doctor attempting to talk with A neutrally
4 (Angry) A furious boss talking with the employee (Frustrated) A frustrated person attempting to argue with his/her boss
5 (Frustrated) A person frustratingly talk about another person's action (Sad) A person feeling guilty and sad about his/her action
6 (Happy) A happy hotel staffs (Happy) Happy customer
7 (Sad) A sad person who felt unsecured about the incoming marriage (Frustrated) A person who frustrated about another person's insecureness
8 (Frustrated) A frustrated patience (Neutral) A Doctor talking with the patience
9 (Neutral) A worker who assigned to tell his/her co-worker about the company's bad situation (Sad) An employee feeling sad after listenning
10 (Angry) A person raging about another person's behavior (Angry) A person who feels like being blamed by another person
11 (Frustrated) A director who unsatisfied co-worker (Frustrated) A frustrated person who try their best on the job
12 (Happy) A person who gets a new job or promotion (Sad) A person who desperate in his/her job
13 (Neutral) A patient inquire information (Happy) A happy doctor telling his/her patience more information
14 (Angry) A person who upset with his/her work (Neutral) A calm friend who listened to another person's problem
15 (Sad) A person sadly tell another person about a relationship (Angry) A person who feels angry after listening to another person's bad relationship
```
File naming convention
Each of files has a unique filename, provided in .flac format with sample rate about 44.1 KHz. The filename consists of a 5 to 6-part identifier (e.g., s002_clip_actor003_impro1_1.flac, s002_clip_actor003_script1_1_1a.flac). These identifiers define the stimulus characteristics:
File Directory Management
```
studio (e.g., studio1-10)
└─ <studio-num> (studio1, studio2, ...)
└─ <mic-type> (con, clip, middle)
└─<audio-file> (.flac)
zoom (e.g., zoom1-10)
└─ <zoom-num> (zoomo1, zoom2, ...)
└─ <mic-type> (mic)
└─ <audio-file> (.flac)
```
Filename identifiers
```
Recording ID (s = studio recording, z = zoom recording)
Number of recording (e.g., s001, z001)
Microphone type (clip, con, middle, mic)
Zoom recording session
mic = An actor's microphone-of-choice
studio recording session
con = Condenser microphone (Cardioid polar patterns) which was placed 0.5m from the actor setting
clip = Lavalier microphone (Omni-directional patterns) attached to the actor’s shirt collar
middle = Condenser microphone (Figure-8 polar patterns) which was placed between actors
Actor ID (actor001 to actor200: Odd-numbered actors are Actor A, even-numbered actors are Actor B in improvisation session).
Session ID (impro = Improvisation Session, script = Script Session)
Script Session (e.g., _script1_1_1a)
Sentence ID (script1-script3)
Repetition (1 = 1st repetition, 2 = 2nd repetition)
Emotion (1 = Neutral, 2 = Angry, 3 = Happy, 4 = Sad, 5 = Frustrated)
Emotional intensity (a = Normal, b = Strong)
Improvisation Session (e.g., _impro1_1)
Scenario ID (impro1-15)
Utterance no. (e.g., _impro1_1 , _impro1_2)
Filename example: s002_clip_actor003_impro1_1.flac
Studio recording number 2 (s002)
Recording by Lavalier microphone (clip)
3rd Actor (actor003)
Improvisation session, scenario 1 (impro1)
1st utterance of scenario recording (1)
Other Files
emotion_label.json - a dictionary for recording id, assigned emotion (assigned_emo), majority emotion (emotion_emo), annotated emotions from crowdsourcing (annotated), and majority agreement score (agreement)
actor_demography.json - a dictionary that contains information about the age and sex of actors.
```
Version
```
Version 1 (26 March 2021): Thai speech emotion recognition dataset THAI SER contains 100 recordings (80 studios and 20 zooms) which is 41 hours 36 minutes long which contain 27,854 utterances and be labeled 27,854 utterances.
Dataset statistics
Recording environment Session Number of utterances Duration(hrs)
Zoom (20) Script 2,398 4.0279
Improvisation 3,606 5.8860
Studio (80) Script 9,582 13.6903
Improvisation 12,268 18.0072
Total (100) 27,854 41.6114
```
Dataset sponsorship and license
Advanced Info Services Public Company Limited
This work is published under a Creative Commons BY-SA 4.0
|
awghuku/thai_ser
|
[
"region:us"
] |
2023-04-27T14:40:02+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "Anger", "1": "Frustration", "2": "Happiness", "3": "Neutral", "4": "Sadness"}}}}], "splits": [{"name": "train", "num_bytes": 2977334910.978, "num_examples": 14231}], "download_size": 2883049328, "dataset_size": 2977334910.978}}
|
2023-04-27T15:26:40+00:00
|
35c2acf5531872b556e960597f92eba7beccce2a
|
# Dataset Card for "wine_reviews"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
james-burton/wine_reviews
|
[
"region:us"
] |
2023-04-27T14:56:24+00:00
|
{"dataset_info": {"features": [{"name": "country", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "points", "dtype": "int64"}, {"name": "price", "dtype": "float64"}, {"name": "province", "dtype": "string"}, {"name": "variety", "dtype": {"class_label": {"names": {"0": "Bordeaux-style Red Blend", "1": "Bordeaux-style White Blend", "2": "Cabernet Franc", "3": "Cabernet Sauvignon", "4": "Champagne Blend", "5": "Chardonnay", "6": "Gamay", "7": "Gew\u00fcrztraminer", "8": "Gr\u00fcner Veltliner", "9": "Malbec", "10": "Merlot", "11": "Nebbiolo", "12": "Pinot Grigio", "13": "Pinot Gris", "14": "Pinot Noir", "15": "Portuguese Red", "16": "Portuguese White", "17": "Red Blend", "18": "Rh\u00f4ne-style Red Blend", "19": "Riesling", "20": "Ros\u00e9", "21": "Sangiovese", "22": "Sauvignon Blanc", "23": "Shiraz", "24": "Sparkling Blend", "25": "Syrah", "26": "Tempranillo", "27": "Viognier", "28": "White Blend", "29": "Zinfandel"}}}}], "splits": [{"name": "train", "num_bytes": 21014061.962412182, "num_examples": 71504}, {"name": "validation", "num_bytes": 3708554.0375878178, "num_examples": 12619}, {"name": "test", "num_bytes": 6181444, "num_examples": 21031}], "download_size": 16227253, "dataset_size": 30904060.0}}
|
2023-04-27T14:56:36+00:00
|
fe0f8bfe4d254d2563abd6a85f1d1e915b61f035
|
# Dataset Card for "OxfordFlowers_test_google_flan_t5_xl_mode_T_SPECIFIC_A_ns_1000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
CVasNLPExperiments/OxfordFlowers_test_google_flan_t5_xl_mode_T_SPECIFIC_A_ns_1000
|
[
"region:us"
] |
2023-04-27T15:18:34+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "prompt", "dtype": "string"}, {"name": "true_label", "dtype": "string"}, {"name": "prediction", "dtype": "string"}], "splits": [{"name": "fewshot_0_clip_tags_ViT_L_14_Attributes_ViT_L_14_text_davinci_003_full_clip_tags_ViT_L_14_simple_specific_rices", "num_bytes": 444513, "num_examples": 1000}], "download_size": 50458, "dataset_size": 444513}}
|
2023-04-27T15:18:36+00:00
|
18d753af546ac1e3b9cf3c9149b5a77fcfc53e38
|
[MLQA (MultiLingual Question Answering)](https://github.com/facebookresearch/mlqa) 中英雙語問答資料集,為原始 MLQA 資料集轉換為台灣正體中文的版本,並將中文與英語版本的相同項目合併,方便供雙語語言模型使用。(致謝:[BYVoid/OpenCC](https://github.com/BYVoid/OpenCC)、[vinta/pangu.js](https://github.com/vinta/pangu.js))
分為 `dev` 以及 `test` 兩個 split,各有 302 及 2986 組資料。
範本:
```json
[
{
"title": {
"en": "Curling at the 2014 Winter Olympics",
"zh_tw": "2014 年冬季奧林匹克運動會冰壺比賽"
},
"paragraphs": [
{
"context": {
"en": "Qualification to the curling tournaments at the Winter Olympics was determined through two methods. Nations could qualify teams by earning qualification points from performances at the 2012 and 2013 World Curling Championships. Teams could also qualify through an Olympic qualification event which was held in the autumn of 2013. Seven nations qualified teams via World Championship qualification points, while two nations qualified through the qualification event. As host nation, Russia qualified teams automatically, thus making a total of ten teams per gender in the curling tournaments.",
"zh_tw": "本屆冬奧會冰壺比賽參加資格有兩種辦法可以取得。各國家或地區可以透過 2012 年和 2013 年的世界冰壺錦標賽,也可以透過 2013 年 12 月舉辦的一次冬奧會資格賽來取得資格。七個國家透過兩屆世錦賽積分之和來獲得資格,兩個國家則透過冬奧會資格賽。作為主辦國,俄羅斯自動獲得參賽資格,這樣就確定了冬奧會冰壺比賽的男女各十支參賽隊伍。"
},
"qas": [
{
"id": "b08184972e38a79c47d01614aa08505bb3c9b680",
"question": {
"zh_tw": "俄羅斯有多少隊獲得參賽資格?",
"en": "How many teams did Russia qualify for?"
},
"answers": {
"en": [
{
"text": "ten teams",
"answer_start": 543
}
],
"zh_tw": [
{
"text": "十支",
"answer_start": 161
}
]
}
}
]
}
]
}
]
```
其餘資訊,詳見:https://github.com/facebookresearch/mlqa 。
## 原始資料集
https://github.com/facebookresearch/mlqa ,分別取其中 `dev` 與 `test` split 的 `context-zh-question-zh`、`context-zh-question-en`、`context-en-question-zh`,總共六個檔案。
## 轉換程序
1. 由 [OpenCC](https://github.com/BYVoid/OpenCC) 使用 `s2twp.json` 配置,將簡體中文轉換為台灣正體中文與臺灣常用詞彙。
2. 使用 Python 版本的 [pangu.js](https://github.com/vinta/pangu.js) 在中英文(全形與半形文字)之間加上空格。
3. 將中英文資料集中的相同項目進行合併。
關於轉換的詳細過程,請見:https://github.com/zetavg/LLM-Research/blob/bba5ff7/MLQA_Dataset_Converter_(en_zh_tw).ipynb 。
## 已知問題
* 有些項目的 `title`、`paragraph` 的 `context`、問題或是答案可能會缺少其中一種語言的版本。
* 部分問題與答案可能存在理解偏誤或歧異,例如上方所列範本「2014 年冬季奧林匹克運動會冰壺比賽」的問題「俄羅斯有多少隊獲得參賽資格?」與答案。
* `paragraph` 的 `context` 在不同語言的版本下可能長度與涵蓋的內容範圍有很大的落差。例如在 development split 中,`title` 為 “Adobe Photoshop” 的項目:
* `zh_tw` 只有兩句話:「Adobe Photoshop,簡稱 “PS”,是一個由 Adobe 開發和發行的影象處理軟體。該軟體釋出在 Windows 和 Mac OS 上。」
* 而 `en` 則是一個段落:“Adobe Photoshop is a raster graphics editor developed and published by Adobe Inc. for Windows and macOS. It was originally created in 1988 by Thomas and John Knoll. Since then, this software has become the industry standard not only in raster graphics editing, but in digital art as a whole. … (下略 127 字)”
|
zetavg/mlqa_en_zh_tw
|
[
"task_categories:question-answering",
"task_categories:translation",
"size_categories:1K<n<10K",
"language:zh",
"language:en",
"license:cc-by-3.0",
"region:us"
] |
2023-04-27T15:39:10+00:00
|
{"language": ["zh", "en"], "license": "cc-by-3.0", "size_categories": ["1K<n<10K"], "task_categories": ["question-answering", "translation"], "pretty_name": "MLQA en-zh_tw"}
|
2023-04-27T16:32:33+00:00
|
de6653eb0a69e325d85005ad0e5d7531fa5eb1d2
|
# Dataset Card for "test_builder"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
polinaeterna/test_builder
|
[
"region:us"
] |
2023-04-27T16:18:09+00:00
|
{"builder_config": [{"config_name": "custom", "data_files": [{"split": "train", "pattern": "custom/train-*"}, {"split": "random", "pattern": "custom/random-*"}]}, {"config_name": "default", "data_files": [{"split": "train", "pattern": "data/train-*"}, {"split": "random", "pattern": "data/random-*"}]}], "dataset_info": [{"config_name": "custom", "features": [{"name": "x", "dtype": "int64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1600, "num_examples": 100}, {"name": "random", "num_bytes": 160, "num_examples": 10}], "download_size": 0, "dataset_size": 1760}, {"config_name": "default", "features": [{"name": "x", "dtype": "int64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1600, "num_examples": 100}, {"name": "random", "num_bytes": 800, "num_examples": 50}], "download_size": 0, "dataset_size": 2400}]}
|
2023-04-27T16:50:04+00:00
|
ed3381a23d9328b02054456d8ca5bdc157f617d5
|
merve/turkish_instructions
|
[
"license:apache-2.0",
"region:us"
] |
2023-04-27T16:20:39+00:00
|
{"license": "apache-2.0"}
|
2023-04-27T16:21:11+00:00
|
|
b1ebb960c697d4e3edea64b0ec5f0d29c4f71b49
|
# Dataset Card for "monet-joe/cv_backbones"
This repository consolidates the collection of backbone networks for pre-trained computer vision models available on the PyTorch official website. It mainly includes various Convolutional Neural Networks (CNNs) and Vision Transformer models pre-trained on the ImageNet1K dataset. The entire collection is divided into two subsets, V1 and V2, encompassing multiple classic and advanced versions of visual models. These pre-trained backbone networks provide users with a robust foundation for transfer learning in tasks such as image recognition, object detection, and image segmentation. Simultaneously, it offers a convenient choice for researchers and practitioners to flexibly apply these pre-trained models in different scenarios.
## Viewer
<https://huggingface.co/spaces/monet-joe/cv-backbones>
### Data Fields
| ver | type | input_size | url |
| :-----------: | :-----------: | :--------------: | :-------------------------------: |
| backbone name | backbone type | input image size | url of pretrained model .pth file |
### Splits
| subsets |
| :--: |
| IMAGENET1K_V1 |
| IMAGENET1K_V2 |
## Maintenance
```bash
git clone [email protected]:datasets/monet-joe/cv_backbones
```
## Usage
```python
from datasets import load_dataset
backbones = load_dataset("monet-joe/cv_backbones")
for weights in backbones["IMAGENET1K_V1"]:
print(weights)
for weights in backbones["IMAGENET1K_V2"]:
print(weights)
```
## Param count
### IMAGENET1K_V1
| Backbone | Params(M) |
| :----------------: | :-------: |
| SqueezeNet1_0 | 1.2 |
| SqueezeNet1_1 | 1.2 |
| ShuffleNet_V2_X0_5 | 1.4 |
| MNASNet0_5 | 2.2 |
| ShuffleNet_V2_X1_0 | 2.3 |
| MobileNet_V3_Small | 2.5 |
| MNASNet0_75 | 3.2 |
| MobileNet_V2 | 3.5 |
| ShuffleNet_V2_X1_5 | 3.5 |
| RegNet_Y_400MF | 4.3 |
| MNASNet1_0 | 4.4 |
| EfficientNet_B0 | 5.3 |
| MobileNet_V3_Large | 5.5 |
| RegNet_X_400MF | 5.5 |
| MNASNet1_3 | 6.3 |
| RegNet_Y_800MF | 6.4 |
| GoogLeNet | 6.6 |
| RegNet_X_800MF | 7.3 |
| ShuffleNet_V2_X2_0 | 7.4 |
| EfficientNet_B1 | 7.8 |
| DenseNet121 | 8 |
| EfficientNet_B2 | 9.1 |
| RegNet_X_1_6GF | 9.2 |
| RegNet_Y_1_6GF | 11.2 |
| ResNet18 | 11.7 |
| EfficientNet_B3 | 12.2 |
| DenseNet169 | 14.1 |
| RegNet_X_3_2GF | 15.3 |
| EfficientNet_B4 | 19.3 |
| RegNet_Y_3_2GF | 19.4 |
| DenseNet201 | 20 |
| EfficientNet_V2_S | 21.5 |
| ResNet34 | 21.8 |
| ResNeXt50_32X4D | 25 |
| ResNet50 | 25.6 |
| Inception_V3 | 27.2 |
| Swin_T | 28.3 |
| Swin_V2_T | 28.4 |
| ConvNeXt_Tiny | 28.6 |
| DenseNet161 | 28.7 |
| EfficientNet_B5 | 30.4 |
| MaxVit_T | 30.9 |
| RegNet_Y_8GF | 39.4 |
| RegNet_X_8GF | 39.6 |
| EfficientNet_B6 | 43 |
| ResNet101 | 44.5 |
| Swin_S | 49.6 |
| Swin_V2_S | 49.7 |
| ConvNeXt_Small | 50.2 |
| EfficientNet_V2_M | 54.1 |
| RegNet_X_16GF | 54.3 |
| ResNet152 | 60.2 |
| AlexNet | 61.1 |
| EfficientNet_B7 | 66.3 |
| Wide_ResNet50_2 | 68.9 |
| ResNeXt101_64X4D | 83.5 |
| RegNet_Y_16GF | 83.6 |
| ViT_B_16 | 86.6 |
| Swin_B | 87.8 |
| Swin_V2_B | 87.9 |
| ViT_B_32 | 88.2 |
| ConvNeXt_Base | 88.6 |
| ResNeXt101_32X8D | 88.8 |
| RegNet_X_32GF | 107.8 |
| EfficientNet_V2_L | 118.5 |
| Wide_ResNet101_2 | 126.9 |
| VGG11_BN | 132.9 |
| VGG11 | 132.9 |
| VGG13 | 133 |
| VGG13_BN | 133.1 |
| VGG16_BN | 138.4 |
| VGG16 | 138.4 |
| VGG19_BN | 143.7 |
| VGG19 | 143.7 |
| RegNet_Y_32GF | 145 |
| ConvNeXt_Large | 197.8 |
| ViT_L_16 | 304.3 |
| ViT_L_32 | 306.5 |
### IMAGENET1K_V2
| Backbone | Params(M) |
| :----------------: | :-------: |
| MobileNet_V2 | 3.5 |
| RegNet_Y_400MF | 4.3 |
| MobileNet_V3_Large | 5.5 |
| RegNet_X_400MF | 5.5 |
| RegNet_Y_800MF | 6.4 |
| RegNet_X_800MF | 7.3 |
| EfficientNet_B1 | 7.8 |
| RegNet_X_1_6GF | 9.2 |
| RegNet_Y_1_6GF | 11.2 |
| RegNet_X_3_2GF | 15.3 |
| RegNet_Y_3_2GF | 19.4 |
| ResNeXt50_32X4D | 25 |
| ResNet50 | 25.6 |
| RegNet_Y_8GF | 39.4 |
| RegNet_X_8GF | 39.6 |
| ResNet101 | 44.5 |
| RegNet_X_16GF | 54.3 |
| ResNet152 | 60.2 |
| Wide_ResNet50_2 | 68.9 |
| RegNet_Y_16GF | 83.6 |
| ResNeXt101_32X8D | 88.8 |
| RegNet_X_32GF | 107.8 |
| Wide_ResNet101_2 | 126.9 |
| RegNet_Y_32GF | 145 |
## Mirror
<https://www.modelscope.cn/datasets/monetjoe/cv_backbones>
## Reference
[1] <https://pytorch.org/vision/main/_modules><br>
[2] <https://pytorch.org/vision/main/models.html>
|
monet-joe/cv_backbones
|
[
"task_categories:image-classification",
"task_categories:feature-extraction",
"size_categories:n<1K",
"language:en",
"license:mit",
"code",
"region:us"
] |
2023-04-27T16:42:10+00:00
|
{"language": ["en"], "license": "mit", "size_categories": ["n<1K"], "task_categories": ["image-classification", "feature-extraction"], "pretty_name": "Vi-Backbones", "tags": ["code"], "viewer": false}
|
2024-01-13T10:15:48+00:00
|
551c27b04dcbc1ba89fed2547cb2eed606b932a3
|
# Dataset Card for "salient_translation_error_detection_preprocessed"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
skrishna/salient_translation_error_detection_preprocessed
|
[
"region:us"
] |
2023-04-27T16:48:10+00:00
|
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "sequence": "string"}, {"name": "multiple_choice_targets", "sequence": "string"}, {"name": "multiple_choice_scores", "sequence": "int32"}, {"name": "idx", "dtype": "int32"}], "splits": [{"name": "train", "num_bytes": 999293, "num_examples": 799}, {"name": "validation", "num_bytes": 250301, "num_examples": 199}], "download_size": 0, "dataset_size": 1249594}}
|
2023-04-28T16:27:33+00:00
|
86fc61c818717da51eaf724c2ee42fbb4bdab1d8
|
# Dataset Card for "VQAv2_validation_no_image_google_flan_t5_xxl_mode_A_T_D_PNP_FILTER_C_Q_rices_ns_1000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
CVasNLPExperiments/VQAv2_validation_no_image_google_flan_t5_xxl_mode_A_T_D_PNP_FILTER_C_Q_rices_ns_1000
|
[
"region:us"
] |
2023-04-27T16:55:40+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "prompt", "sequence": "string"}, {"name": "question", "dtype": "string"}, {"name": "true_label", "sequence": "string"}, {"name": "prediction", "dtype": "string"}], "splits": [{"name": "fewshot_0_clip_tags_LAION_ViT_H_14_2B_with_openai_Attributes_LAION_ViT_H_14_2B_descriptors_text_davinci_003_full_DETA_detections_deta_swin_large_o365_coco_classes_caption_module_random_", "num_bytes": 10660287, "num_examples": 1000}], "download_size": 436996, "dataset_size": 10660287}}
|
2023-05-04T21:42:41+00:00
|
1115d5209290c9ced5b65927d4001b2f2661768c
|
Tauhait/JargonDetection
|
[
"task_categories:token-classification",
"size_categories:10K<n<100K",
"license:afl-3.0",
"region:us"
] |
2023-04-27T16:59:03+00:00
|
{"license": "afl-3.0", "size_categories": ["10K<n<100K"], "task_categories": ["token-classification"]}
|
2023-04-27T17:41:14+00:00
|
|
d263262f1094df07c6e9f4b956b5ee5057eaf632
|
# Dataset Card for "github-code-haskell-file"
Rows: 339k
Download Size: 806M
This dataset is extracted from [github-code-clean](https://huggingface.co/datasets/codeparrot/github-code-clean).
Each row also contains attribute values for my personal analysis project.
12.6% (43k) of the rows have cyclomatic complexity and LOC valued at `-1` because [`homplexity`](https://github.com/BlastWind/homplexity) failed in parsing the row's `uncommented_code`.
|
blastwind/github-code-haskell-file
|
[
"task_categories:text-generation",
"size_categories:100K<n<1M",
"code",
"haskell",
"region:us"
] |
2023-04-27T17:27:56+00:00
|
{"size_categories": ["100K<n<1M"], "task_categories": ["text-generation"], "dataset_info": {"features": [{"name": "code", "dtype": "string"}, {"name": "repo_name", "dtype": "string"}, {"name": "path", "dtype": "string"}, {"name": "license", "dtype": "string"}, {"name": "size", "dtype": "int64"}, {"name": "n_ast_errors", "dtype": "int64"}, {"name": "ast_max_depth", "dtype": "int64"}, {"name": "n_whitespaces", "dtype": "int64"}, {"name": "n_ast_nodes", "dtype": "int64"}, {"name": "n_ast_terminals", "dtype": "int64"}, {"name": "n_ast_nonterminals", "dtype": "int64"}, {"name": "loc", "dtype": "int64"}, {"name": "cycloplexity", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 2024779946, "num_examples": 339895}], "download_size": 805998536, "dataset_size": 2024779946}, "tags": ["code", "haskell"]}
|
2023-05-16T04:09:56+00:00
|
f32f4989813c157063026cc5b1ac1acd6c9047ac
|
# Dataset Card for "counterfact-filtered-gptj6b"
This dataset is a subset of azhx/counterfact-easy, however it was filtered based on a *heuristic* that was used to determine whether the knowledge in each row is actually known by the GPT-J-6B model
## The heuristic is as follows:
For each prompt in the original counterfact dataset used by ROME, we use GPT-J-6B to generate n=5 completions to a max generated token length of 30.
If the completion contains the answer that is specified in the dataset for the majority of the completions (>=3), then we conclude that the model does indeed know this fact.
In practice, we find that many of the prompts in the original dataset cannot be answered accurately a lot of the time using GPT-J-6B. The number of case_ids were filtered from ~21k to about ~3k.
|
azhx/counterfact-filtered-gptj6b
|
[
"region:us"
] |
2023-04-27T17:28:09+00:00
|
{"dataset_info": {"features": [{"name": "subject", "dtype": "string"}, {"name": "proposition", "dtype": "string"}, {"name": "subject+predicate", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "False", "1": "True"}}}}, {"name": "case_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 915160.9417906551, "num_examples": 6896}, {"name": "test", "num_bytes": 101655.05820934482, "num_examples": 766}], "download_size": 421630, "dataset_size": 1016816.0}}
|
2023-04-27T17:35:51+00:00
|
ed9f80d872f1dc90c750571b346b82944a74bf75
|
# Dataset Card for "VQAv2Validation_ViT_L_14_A_T_C_D-PNP-FILTER_benchmarks_1000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
LambdaTests/VQAv2Validation_ViT_L_14_A_T_C_D-PNP-FILTER_benchmarks_1000
|
[
"region:us"
] |
2023-04-27T17:36:11+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "response", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 27495, "num_examples": 1000}], "download_size": 0, "dataset_size": 27495}}
|
2023-04-28T01:05:16+00:00
|
0baa1fa729911d3d944cce01f8175378c63304af
|
# Dataset Card for "simpsons_caption"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
megantron/simpsons_caption
|
[
"region:us"
] |
2023-04-27T17:43:34+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13735625.0, "num_examples": 200}], "download_size": 13637915, "dataset_size": 13735625.0}}
|
2023-04-27T17:50:26+00:00
|
a9732099f27b72176568a8b9fa4c2fb868f903f6
|
The MADIS observation data is stored in Zarr files, which can be opened using the Xarray library.
The observations for a specific hour YYYY-MM-DD HH:00 are stored in the file `
madis2020/MM/(mesonet|metar)/YYYYMMDD_HH00.zarr`.
Each Zarr file has the following variables:
* temperature
* dewpoint
* windSpeed
* precip6Hour (available only in the METAR files)
Each variable has a corresponding quality control variable:
* temperatureDD
* dewpointDD
* windSpeedDD
* precip6HourDD
Lastly, the dataset contains two variables `latitue and `longitude` that specify where the observation was taken.
|
excarta/madis2020
|
[
"license:cc-by-4.0",
"region:us"
] |
2023-04-27T18:21:00+00:00
|
{"license": "cc-by-4.0"}
|
2023-05-04T16:32:54+00:00
|
178a36e30d2e4bc331227dcf5b8b4929189934bd
|
# Dataset Card for "gpteacher-role-play-chatml"
Data preprocessing pipeline: https://github.com/AlekseyKorshuk/chat-data-pipeline
|
AlekseyKorshuk/gpteacher-role-play-chatml
|
[
"region:us"
] |
2023-04-27T19:08:22+00:00
|
{"dataset_info": {"features": [{"name": "conversation", "list": [{"name": "content", "dtype": "string"}, {"name": "do_train", "dtype": "bool"}, {"name": "role", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 6168190, "num_examples": 9111}], "download_size": 0, "dataset_size": 6168190}}
|
2023-07-24T21:32:56+00:00
|
ed93a6ca4322b873c75b30ed4dd74557fb5ce6c4
|
# Dataset Card for "dolly-chatml"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
AlekseyKorshuk/dolly-chatml
|
[
"region:us"
] |
2023-04-27T19:13:45+00:00
|
{"dataset_info": {"features": [{"name": "conversation", "list": [{"name": "content", "dtype": "string"}, {"name": "do_train", "dtype": "bool"}, {"name": "role", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 12384895, "num_examples": 15011}], "download_size": 7269791, "dataset_size": 12384895}}
|
2023-06-06T15:40:49+00:00
|
7fe6073014c8f35c0bc771f18d32b2ff3e3c3da4
|
Medical Diffusion SAM
This dataset is for medical diffusion SAM for training with controlnet.
|
birgermoell/medical-diffusion-sam
|
[
"region:us"
] |
2023-04-27T19:20:50+00:00
|
{}
|
2023-04-28T13:51:56+00:00
|
82051101cfdf253729e5f418c38d2d1305ef0a91
|
# Dataset Card for "logo-dataset-v4"
This dataset consists of 803 pairs \\( (x, y) \\), where \\( x \\) is the image and \\( y \\) is the description of the image.
The data have been manually collected and labelled, so the dataset is fully representative and free of rubbish.
The logos in the dataset are minimalist, meeting modern design requirements and reflecting the company's industry.
# Disclaimer
This dataset is made available for academic research purposes only. All the images are collected from the Internet, and the copyright belongs to the original owners. If any of the images belongs to you and you would like it removed, please inform us, we will try to remove it from the dataset.
|
logo-wizard/modern-logo-dataset
|
[
"task_categories:text-to-image",
"size_categories:n<1K",
"language:en",
"license:cc-by-nc-3.0",
"doi:10.57967/hf/0592",
"region:us"
] |
2023-04-27T19:26:59+00:00
|
{"language": ["en"], "license": "cc-by-nc-3.0", "size_categories": ["n<1K"], "task_categories": ["text-to-image"], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 209598433, "num_examples": 803}], "download_size": 208886058, "dataset_size": 209598433}}
|
2023-05-09T12:40:55+00:00
|
cbbf6c2beb3c0c31d52e411153b2ef684844301e
|
# Dataset Card for "SHP-chatml"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
AlekseyKorshuk/SHP-chatml
|
[
"region:us"
] |
2023-04-27T19:30:44+00:00
|
{"dataset_info": {"features": [{"name": "conversation", "list": [{"name": "content", "dtype": "string"}, {"name": "do_train", "dtype": "bool"}, {"name": "role", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 69458847, "num_examples": 43269}], "download_size": 41755344, "dataset_size": 69458847}}
|
2023-06-05T21:25:20+00:00
|
0f5c257155eddd3fdc861f6a98ab71826c12bcca
|
# Dataset Card for "simpsons_20"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
megantron/simpsons_20
|
[
"region:us"
] |
2023-04-27T19:56:38+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 736033.0, "num_examples": 20}], "download_size": 683136, "dataset_size": 736033.0}}
|
2023-04-27T23:21:29+00:00
|
b6fc23f87a7dfad4cb045191e99f3e79ff73c7b5
|
KyonBS/HigiriKunoichiTsubaki
|
[
"license:openrail",
"region:us"
] |
2023-04-27T20:01:51+00:00
|
{"license": "openrail"}
|
2023-04-27T20:02:44+00:00
|
|
826d1bf23fd07d2fbd820479bb8d06199ffff13b
|
# Dataset Card for "flame_tensors"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
dhmeltzer/flame_tensors
|
[
"region:us"
] |
2023-04-27T20:12:34+00:00
|
{"dataset_info": {"features": [{"name": "label", "dtype": "int64"}, {"name": "pixel_values", "sequence": {"sequence": {"sequence": "float32"}}}], "splits": [{"name": "train", "num_bytes": 19008408672, "num_examples": 31428}, {"name": "validation", "num_bytes": 4752706992, "num_examples": 7858}, {"name": "test", "num_bytes": 5186365800, "num_examples": 8575}], "download_size": 7096810022, "dataset_size": 28947481464}}
|
2023-04-27T20:28:00+00:00
|
8518c15aba31c56d6a9315acd1c2f5e8ab137dc1
|
# Dataset Card for "gpt4all-jazzy-chatml"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
AlekseyKorshuk/gpt4all-jazzy-chatml
|
[
"region:us"
] |
2023-04-27T20:17:42+00:00
|
{"dataset_info": {"features": [{"name": "conversation", "list": [{"name": "content", "dtype": "string"}, {"name": "do_train", "dtype": "bool"}, {"name": "role", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 1484028130, "num_examples": 711126}], "download_size": 768135582, "dataset_size": 1484028130}}
|
2023-06-05T21:33:57+00:00
|
395e140015d2b57227c5af40e26afafa82c08f69
|
A dataset of Wikipedia's most popular articles, an extensive collection of unprocessed text data covering a diverse range of topics including history, science, critical thinking, mathematics, and more.
This dataset aims to facilitate the pretraining of large language models by providing a vast corpus of informative content. This dataset is an excellent resource for researchers and developers looking to pretrain large language models.
Its unprocessed format and diverse range of topics make it ideal for pretraining custom models that can understand and generate natural language text.
|
Celestinian/minimal-wikipedia-corpus-raw
|
[
"license:mit",
"region:us"
] |
2023-04-27T20:22:23+00:00
|
{"license": "mit", "datasetsviewer": {"not_supported": true}}
|
2023-04-29T16:05:03+00:00
|
19ee7c53b3e2749a2c357db07a820bf6734bf192
|
Celestinian/literature-dataset
|
[
"license:unknown",
"region:us"
] |
2023-04-27T20:23:03+00:00
|
{"license": "unknown"}
|
2023-05-02T22:51:01+00:00
|
|
6b1ec82307e58ef0ced77a03a60e693bde6e64e9
|
100.772 texts with their corresponding labels
NOT_OFF_HATEFUL_TOXIC 81.359 values
OFF_HATEFUL_TOXIC 19.413 values
|
christinacdl/OFF_HATE_TOXIC_ENGLISH
|
[
"task_categories:text-classification",
"size_categories:n<1K",
"language:en",
"license:apache-2.0",
"code",
"region:us"
] |
2023-04-27T20:46:33+00:00
|
{"language": ["en"], "license": "apache-2.0", "size_categories": ["n<1K"], "task_categories": ["text-classification"], "pretty_name": "Offensive_Hateful_Toxic_Dataset", "tags": ["code"]}
|
2023-05-02T18:43:35+00:00
|
03c26e2c0988b39a7de605a70d86d4d9169e9954
|
# :page_with_curl: Spanish Paraphrase Corpora

Manually paraphrased corpus in Spanish
## The Sushi Corpus
This [corpus](https://github.com/GIL-UNAM/SpanishParaphraseCorpora/tree/main/Sushi) is designed to assess the similarity between a pair of texts and to evaluate different similarity measures, both for whole documents or for individual sentences. It is built around the subject of a Spanish blog article related to **Sushi**. Several volunteers (undergraduate, graduate, and Ph.D. students) were asked to intentionally reformulate or paraphrase this article. The paraphrase of the article was carried out on two levels, according to the rules:
<ul>
<li> <b>Low level:</b> Only lexical variation </li>
<li> <b>High level:</b> Lexical, syntactic, textual or discursive organization variation and fusion or separation of sentences.</li>
<li> <b>No Paraphrase: </b> Texts on the same theme and source as the original article, related to sushi.
<li> <b>No Sushi: </b> Texts on different theme as the original article but with overlapping vocabulary were gathered. That is, texts not related to sushi, but with exactly the same vocabulary as the original one. Some volunteers wrote a free text using the same content words as the original.
</ul>
### :pencil: How to cite
If you use the corpus please cite the following articles:
1) Gómez-Adorno H., Bel-Enguix G., Sierra G., Torres-Moreno JM., Martinez R., Serrano P. (2020) Evaluation of Similarity Measures in a Benchmark for Spanish Paraphrasing Detection. In: Martínez-Villaseñor L., Herrera-Alcántara O., Ponce H., Castro-Espinoza F.A. (eds) Advances in Computational Intelligence. MICAI 2020. Lecture Notes in Computer Science, vol 12469. Springer, Cham. https://doi.org/10.1007/978-3-030-60887-3_19
2) Castro, B., Sierra, G., Torres-Moreno, J.M., Da Cunha, I.: El discurso y la semántica como recursos para la detección de similitud textual. In: Proceedings of the III RST Meeting (8th Brazilian Symposium in Information and Human Language Technology, STIL 2011). Brazilian Computer Society, Cuiabá (2011)
## Aknowledgments
The work was done with partial support of CONACYT project A1-S-27780 and UNAM-PAPIIT projects IA401219, TA100520, AG400119.
## License
[CC0 1.0 Universal](https://choosealicense.com/licenses/cc0-1.0/)
|
GIL-UNAM/SpanishParaphraseCorpora
|
[
"task_categories:feature-extraction",
"size_categories:n<1K",
"language:es",
"license:cc0-1.0",
"region:us"
] |
2023-04-27T20:53:11+00:00
|
{"language": ["es"], "license": "cc0-1.0", "size_categories": ["n<1K"], "task_categories": ["feature-extraction"], "pretty_name": "Manually paraphrased corpus in Spanish"}
|
2023-05-17T01:10:11+00:00
|
1c59585e9524df70517856dec8975cad2de6c16d
|
# Dataset Card for "diffusers_sprint_cartoonify_yourself"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mohammadhia/diffusers_sprint_cartoonify_yourself
|
[
"region:us"
] |
2023-04-27T21:16:47+00:00
|
{"dataset_info": {"features": [{"name": "input_image", "dtype": "image"}, {"name": "edit_prompt", "dtype": "string"}, {"name": "edited_image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 29092003.0, "num_examples": 20}], "download_size": 29095136, "dataset_size": 29092003.0}}
|
2023-04-27T21:16:51+00:00
|
a3a8c8615593addcdc0177bc8f24531137a0b7ed
|
In-Development dataset to fix some core knowledge/ability tasks in language models.
Data is like the alpaca format (Instruction-Input-Response), and formated as a json file.
The json file holds a single array of dictionaries. Each dictionary has 3 keys, "Instruction","Input","Response".
Data is generated using convetional code, and pulls from subsets of pile_v1 for when raw text data is needed.
Questions aim to be un-ambiguous and mostly aimed at foundational knowledge. (rather that long instruction chains or trivia)
Project could be dropped at any point.
---
license: cc-by-nc-4.0
---
|
yuzaboto/VioletSalvia
|
[
"region:us"
] |
2023-04-27T21:21:33+00:00
|
{}
|
2023-05-07T21:30:58+00:00
|
5cf00957d727b1cbdb8b15b60d39e4883a3f031e
|
trec-product-search/Product-Search-Images-v0.1
|
[
"task_categories:text-classification",
"size_categories:1M<n<10M",
"language:en",
"license:apache-2.0",
"information retrieval",
"search",
"image search",
"region:us"
] |
2023-04-27T21:23:52+00:00
|
{"language": ["en"], "license": "apache-2.0", "size_categories": ["1M<n<10M"], "task_categories": ["text-classification"], "pretty_name": "TREC Product Search Images", "tags": ["information retrieval", "search", "image search"]}
|
2023-05-21T16:29:09+00:00
|
|
065b83edfbe5094996ddc0c46c95ff6e293480c8
|
# :notebook: Negation and Sentiment Detection on Mexican Spanish Tweets: The T-MexNeg Corpus
In Spanish, there are three basics levels of negation: lexical, morphological, and syntactic. This corpus addreesses only the syntactic negation. Negative sentences express false states or the nonexistence of the action that is in the sentence and they might also change sentiment within lexical alignments in a text. Syntax negation is a syntax operator word that affects the whole sentence or a section of it. This syntax operator is called negation cue. They can be adverbs, prepositions, indefinite pronouns, and conjunctions. Usually, in Spanish, negation cues precede the verb, but they can also appear postponed.
The section affected by the negation cue is called scope. The words that are specifically reached by it, which can be verbs, nouns, or phrases, are referred to as event. Therefore, the basic requirements to create a negative sentence are the negation cue, the scope, and the event.
## :page_facing_up: Corpus Description
The T-MexNeg corpus of Tweets written in Mexican Spanish. It consists of 13,704 Tweets, of which 4895 contain negation structures.
The corpus is the result of an analysis of sentiment and negation statements embedded in the language employed on social media. This repository includes annotation guidelines along with the corpus, manually annotated with labels of sentiment, negation cue, scope, and, event.
Twitter was used as the innitial source of the corpus; the tweets are a random subset of a set collected from Mexican users from September 2017 to April 2019.
## :paperclip: Tags Description
Each entry in the corpus consists of a tweet with two components: the content, and the sentiment tag.
Within the content, the annotation identifies three main negation components: Negation Cue, Event, and Scope. It also differentiate among three types of negation cues: Simple Negation (**neg_exp**), Related Negation (**neg_rel**), and False Negation (**no_neg**).
- **neg_exp** : It refers to the negation cues that are not linked to other negation cues \ref{1c}. Thus, the Scope and the Event are only directly related to this negation.
- **neg_rel** : This label is used for negation cues that are linked to other negation cues in the sentence and are dependent on them. The related negation does not have an event or scope and it is part of the scope of the main negation.
- **no_neg** : This tag is used with negation cues that do not negate anything at a semantic level, as well as with some abbreviations and idiomatic phrases and discursive markers.
- **event** : The Event labels the word or words that are specifically negated.
- **scope** : This tag corresponds to all words that are affected by the negation.
The general structure of an entry in the corpus would present the tags as follows:
```
<tweet>
<polarity>
'NEGATIVE/POSITIVE/NEUTRAL'
</polarity>
<content>
<neg_structure>
<scope>
<negexp class='simple/related/no_neg'>
</negexp>
<event>
</event>
</scope>
</neg_structure>
</content>
</tweet>
```
## :pencil: Citing
If you use the corpus please use the following BibTeX:
```
@Article{app11093880,
AUTHOR = {Bel-Enguix, Gemma and Gómez-Adorno, Helena and Pimentel, Alejandro and Ojeda-Trueba, Sergio-Luis and Aguilar-Vizuet, Brian},
TITLE = {Negation Detection on Mexican Spanish Tweets: The T-MexNeg Corpus},
JOURNAL = {Applied Sciences},
VOLUME = {11},
YEAR = {2021},
NUMBER = {9},
ARTICLE-NUMBER = {3880},
URL = {https://www.mdpi.com/2076-3417/11/9/3880},
ISSN = {2076-3417},
DOI = {10.3390/app11093880}
}
```
## Aknowledgments
This resource was funded by CONACyT project CB A1-S-27780, DGAPA-UNAM PAPIIT grants number TA400121 and TA100520.
|
GIL-UNAM/negation_twitter_mexican_spanish
|
[
"region:us"
] |
2023-04-27T21:28:06+00:00
|
{}
|
2023-05-17T14:25:40+00:00
|
0d8a25b780aacd9f1c2f08e0d308114869515a31
|
# Dataset Card for "light-chatml"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
AlekseyKorshuk/light-chatml
|
[
"region:us"
] |
2023-04-27T21:29:36+00:00
|
{"dataset_info": {"features": [{"name": "conversation", "list": [{"name": "content", "dtype": "string"}, {"name": "do_train", "dtype": "bool"}, {"name": "role", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 32192103, "num_examples": 11024}], "download_size": 15589538, "dataset_size": 32192103}}
|
2023-06-05T21:38:42+00:00
|
f4465d368b9b64be2c876be540d6dd4dfbc661c3
|
# AutoTrain Dataset for project: teste
## Dataset Description
This dataset has been automatically processed by AutoTrain for project teste.
### Languages
The BCP-47 code for the dataset's language is pt.
## Dataset Structure
### Data Instances
A sample from this dataset looks as follows:
```json
[
{
"context": "Sherlock Holmes \u00e9 um personagem de fic\u00e7\u00e3o criado pelo escritor brit\u00e2nico Sir Arthur Conan Doyle. Ele \u00e9 um detetive famoso por sua habilidade em resolver mist\u00e9rios e crimes complexos.",
"question": "Pergunta 268: Qual \u00e9 o nome do irm\u00e3o mais velho de Sherlock Holmes que trabalha para o servi\u00e7o secreto brit\u00e2nico?",
"answers.text": [
"Mycroft Holmes"
],
"answers.answer_start": [
0
]
},
{
"context": "Sherlock Holmes \u00e9 um personagem de fic\u00e7\u00e3o criado pelo escritor brit\u00e2nico Sir Arthur Conan Doyle. Ele \u00e9 um detetive famoso por sua habilidade em resolver mist\u00e9rios e crimes complexos.",
"question": "Pergunta 52: Qual \u00e9 o nome do irm\u00e3o mais velho de Sherlock Holmes que trabalha para o servi\u00e7o secreto brit\u00e2nico?",
"answers.text": [
"Mycroft Holmes"
],
"answers.answer_start": [
0
]
}
]
```
### Dataset Fields
The dataset has the following fields (also called "features"):
```json
{
"context": "Value(dtype='string', id=None)",
"question": "Value(dtype='string', id=None)",
"answers.text": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)",
"answers.answer_start": "Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None)"
}
```
### Dataset Splits
This dataset is split into a train and validation split. The split sizes are as follow:
| Split name | Num samples |
| ------------ | ------------------- |
| train | 720 |
| valid | 180 |
|
JeanL-0/Questions-ptbr
|
[
"task_categories:question-answering",
"task_categories:text-generation",
"task_categories:text2text-generation",
"size_categories:n<1K",
"language:pt",
"region:us"
] |
2023-04-27T21:51:27+00:00
|
{"language": ["pt"], "size_categories": ["n<1K"], "task_categories": ["question-answering", "text-generation", "text2text-generation"]}
|
2023-05-01T02:42:32+00:00
|
69757942aa3e9f7edd544ad0f8e74c9b54fb1613
|
# Dataset Card for "am_samoa_case_law_text"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
LawInformedAI/am_samoa_case_law
|
[
"region:us"
] |
2023-04-27T21:59:52+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 18933000, "num_examples": 2171}], "download_size": 9873706, "dataset_size": 18933000}}
|
2023-04-27T21:59:55+00:00
|
ed7cdb9523623cbccaf5e854418e864000ac5006
|
# Dataset Card for "soda-chatml"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
AlekseyKorshuk/soda-chatml
|
[
"region:us"
] |
2023-04-27T22:21:01+00:00
|
{"dataset_info": {"features": [{"name": "conversation", "list": [{"name": "content", "dtype": "string"}, {"name": "do_train", "dtype": "bool"}, {"name": "role", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 1405384981, "num_examples": 1486896}], "download_size": 780354430, "dataset_size": 1405384981}}
|
2023-06-05T19:26:31+00:00
|
4e8e4b13633d6c77bb31665b7ffac9fb97ef5081
|
# Dataset Card for "hh-chatml"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
AlekseyKorshuk/hh-chatml
|
[
"region:us"
] |
2023-04-27T22:29:32+00:00
|
{"dataset_info": {"features": [{"name": "conversation", "list": [{"name": "content", "dtype": "string"}, {"name": "do_train", "dtype": "bool"}, {"name": "role", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 183931784, "num_examples": 169352}], "download_size": 92779456, "dataset_size": 183931784}}
|
2023-06-05T19:27:49+00:00
|
202f76bf6975217d1e9924b1db4c6342131b6b3e
|
# Dataset Card for "channel_metadata"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
kenanazam/channel_metadata
|
[
"region:us"
] |
2023-04-27T22:52:03+00:00
|
{"dataset_info": {"features": [{"name": "Channel ID", "dtype": "string"}, {"name": "Title", "dtype": "string"}, {"name": "Time Created", "dtype": "string"}, {"name": "Time Published", "dtype": "string"}, {"name": "Duration", "dtype": "string"}, {"name": "Description", "dtype": "string"}, {"name": "Category", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 33467, "num_examples": 10}], "download_size": 27899, "dataset_size": 33467}}
|
2023-04-28T21:29:53+00:00
|
07d87c811ed7bd912205010e599e79be6636fad0
|
davanstrien/fuego-20230428-001647-941db9
|
[
"fuego",
"region:us"
] |
2023-04-27T23:16:48+00:00
|
{"tags": ["fuego"], "fuego": {"id": "20230428-001647-941db9", "status": "done", "script": "scratchpad.py", "requirements_file": "requirements.txt", "space_id": "davanstrien/fuego-20230428-001647-941db9", "space_hardware": "cpu-basic"}}
|
2023-04-27T23:24:21+00:00
|
|
9e5d8324fed7963babaf4883da61411000e7f4ea
|
# Dataset Card for "simpsons_captions"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
megantron/simpsons_captions
|
[
"region:us"
] |
2023-04-27T23:23:01+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13735625.0, "num_examples": 200}], "download_size": 13637896, "dataset_size": 13735625.0}}
|
2023-04-27T23:23:12+00:00
|
7b5180877abd6f4a53e95b0be4df46d94a6d284e
|
# Dataset Card for "berlin_state_library_ocr_with_images"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
davanstrien/berlin_state_library_ocr_with_images
|
[
"region:us"
] |
2023-04-27T23:24:15+00:00
|
{"dataset_info": {"features": [{"name": "file name", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "wc", "sequence": "float64"}, {"name": "ppn", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "language_confidence", "dtype": "float64"}, {"name": "publisher", "dtype": "string"}, {"name": "place", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "aut", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 695708068.0, "num_examples": 1000}], "download_size": 0, "dataset_size": 695708068.0}}
|
2023-05-26T05:06:13+00:00
|
d6be26fbd5b42c55aaf3a61034371070b639ac82
|
davanstrien/fuego-20230428-002930-572415
|
[
"fuego",
"region:us"
] |
2023-04-27T23:29:31+00:00
|
{"tags": ["fuego"], "fuego": {"id": "20230428-002930-572415", "status": "running", "script": "scratchpad.py", "requirements_file": "requirements.txt", "space_id": "davanstrien/fuego-20230428-002930-572415", "space_hardware": "cpu-basic"}}
|
2023-04-27T23:30:35+00:00
|
|
883f24f473e7e9e5e83f618b54cab5747c149801
|
davanstrien/fuego-20230428-014702-04e0f5
|
[
"fuego",
"region:us"
] |
2023-04-28T00:47:03+00:00
|
{"tags": ["fuego"], "fuego": {"id": "20230428-014702-04e0f5", "status": "running", "script": "scratchpad.py", "requirements_file": "requirements.txt", "space_id": "davanstrien/fuego-20230428-014702-04e0f5", "space_hardware": "cpu-basic"}}
|
2023-04-28T00:48:05+00:00
|
|
d2ec79f243f7418f31190ab2016b185e4b5a83fc
|
radlab/polish-sts-dataset
|
[
"size_categories:1K<n<10K",
"language:pl",
"license:lgpl-3.0",
"sts",
"region:us"
] |
2023-04-28T00:47:31+00:00
|
{"language": ["pl"], "license": "lgpl-3.0", "size_categories": ["1K<n<10K"], "pretty_name": "Polish STS dataset", "tags": ["sts"]}
|
2023-08-20T22:56:56+00:00
|
|
1c6d9e51705099ac54dc608c95707c978dfe7352
|
# Dataset Card for "korquad-aug-valid"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
HSJuan/korquad-aug-valid
|
[
"region:us"
] |
2023-04-28T01:52:44+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "sequence": [{"name": "text", "dtype": "string"}, {"name": "answer_start", "dtype": "int32"}]}, {"name": "pos_aug", "dtype": "string"}, {"name": "neg_aug", "dtype": "string"}], "splits": [{"name": "validation", "num_bytes": 9298037, "num_examples": 5774}], "download_size": 1728913, "dataset_size": 9298037}}
|
2023-04-28T01:52:51+00:00
|
e4e40954afbbc9c7bfb4c8b622b4c7e19d9c105f
|
AlShurbaji/labels
|
[
"license:other",
"region:us"
] |
2023-04-28T01:56:57+00:00
|
{"license": "other"}
|
2023-11-22T11:27:38+00:00
|
|
049bcd5174058cb4e764d93fba13158fbd9290a3
|
# Storyteller intent classification dataset
Data to train a intent classification model for a typical story telling robot.
It has 5 labels, each with 150 sentences.
Labels:
- summarize
- took_action_and_continue
- other
- start_generating_stories
- exit
|
zxypro/storyteller-bot-intent-classification
|
[
"license:apache-2.0",
"region:us"
] |
2023-04-28T02:04:16+00:00
|
{"license": "apache-2.0"}
|
2023-04-28T02:09:11+00:00
|
8efd0b29b3f34e30cb39757611b27d6261bdb783
|
# Dataset Card for "cs6301project50k"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
jxu9001/cs6301project50k
|
[
"region:us"
] |
2023-04-28T02:11:49+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "expression", "dtype": "string"}, {"name": "img_width", "dtype": "int64"}, {"name": "img_height", "dtype": "int64"}, {"name": "x", "dtype": "float64"}, {"name": "y", "dtype": "float64"}, {"name": "w", "dtype": "float64"}, {"name": "h", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 7128143566.0, "num_examples": 40000}, {"name": "test", "num_bytes": 1723596306.0, "num_examples": 10000}], "download_size": 4714944672, "dataset_size": 8851739872.0}}
|
2023-04-28T07:00:35+00:00
|
37ec951b383991c2b16bcad69b070e403b9b3496
|
tanakaa/test
|
[
"license:other",
"region:us"
] |
2023-04-28T02:36:28+00:00
|
{"license": "other"}
|
2023-07-24T15:24:08+00:00
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.