sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
listlengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
listlengths
0
25
languages
listlengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
listlengths
0
352
processed_texts
listlengths
1
353
tokens_length
listlengths
1
353
input_texts
listlengths
1
40
761ecd7a87104707b2dc00a8860b04849b910e60
# Dataset Card for "translation_4_llama2_with_end_token" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
RikoteMaster/translation_4_llama2_with_end_token
[ "region:us" ]
2023-10-07T14:20:42+00:00
{"dataset_info": {"features": [{"name": "English", "dtype": "string"}, {"name": "Spanish", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 43090372, "num_examples": 118964}], "download_size": 12020346, "dataset_size": 43090372}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-07T14:41:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "translation_4_llama2_with_end_token" More Information needed
[ "# Dataset Card for \"translation_4_llama2_with_end_token\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"translation_4_llama2_with_end_token\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"translation_4_llama2_with_end_token\"\n\nMore Information needed" ]
1fde47e465617b3c603bc91c527fd931e01ebbdd
# Dataset Card for "financial_phrasebank_en" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
infCapital/financial_phrasebank_en
[ "region:us" ]
2023-10-07T14:35:00+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2048295, "num_examples": 14780}], "download_size": 1185669, "dataset_size": 2048295}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-07T14:52:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "financial_phrasebank_en" More Information needed
[ "# Dataset Card for \"financial_phrasebank_en\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"financial_phrasebank_en\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"financial_phrasebank_en\"\n\nMore Information needed" ]
e877739418bc46db0695ffe5ef1f25339872e492
# Dataset Card for "pile_small_miniLM" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nthngdy/pile_small_miniLM
[ "region:us" ]
2023-10-07T14:38:35+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "meta", "struct": [{"name": "perplexity_score", "dtype": "float64"}, {"name": "pile_set_name", "dtype": "string"}]}, {"name": "emb", "sequence": "float32"}], "splits": [{"name": "train", "num_bytes": 760056668, "num_examples": 100000}], "download_size": 545226370, "dataset_size": 760056668}}
2023-10-07T14:39:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "pile_small_miniLM" More Information needed
[ "# Dataset Card for \"pile_small_miniLM\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"pile_small_miniLM\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"pile_small_miniLM\"\n\nMore Information needed" ]
edcc674b1cd200f36c5b0015695f597c08a6a14b
# Dataset Card for "_dataset_20231007_153958" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tr416/_dataset_20231007_153958
[ "region:us" ]
2023-10-07T14:39:58+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 762696.0, "num_examples": 297}, {"name": "test", "num_bytes": 7704.0, "num_examples": 3}], "download_size": 73889, "dataset_size": 770400.0}}
2023-10-07T14:39:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "_dataset_20231007_153958" More Information needed
[ "# Dataset Card for \"_dataset_20231007_153958\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"_dataset_20231007_153958\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"_dataset_20231007_153958\"\n\nMore Information needed" ]
538a60bd77af68559a11d1e5861256593124fa1a
# Dataset Card for "massive_5_lang_DA4_tokenized" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/massive_5_lang_DA4_tokenized
[ "region:us" ]
2023-10-07T15:06:59+00:00
{"dataset_info": {"features": [{"name": "pass_label", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 519317955, "num_examples": 705250}], "download_size": 162988938, "dataset_size": 519317955}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-07T15:07:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for "massive_5_lang_DA4_tokenized" More Information needed
[ "# Dataset Card for \"massive_5_lang_DA4_tokenized\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"massive_5_lang_DA4_tokenized\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"massive_5_lang_DA4_tokenized\"\n\nMore Information needed" ]
1e87741d6c7f6cbc7b513dd5763cbad529424508
![License Badge](https://img.shields.io/badge/license-MIT-green) # 🌌 BioGalacticModels Zoo ## **🔭 Overview** ### **🛰️ Space Biology Datasets And Models Hub** The frontier of space biology research is vast, with uncharted territories that hide the secrets of life beyond our blue planet. 🌍 With the increasing need for accurate and reliable methods to understand and decode the effects of space on biology, machine learning, particularly transfer learning, emerges as a promising approach. 🧠 This repository serves as a nexus between space biology and computational methodologies, aimed at harnessing the power of transfer learning for space biology applications. 💡 We present to you a comprehensive database of publicly available biomedical datasets and models that can be used to further space-biology research and discovery. 🌠 ### **🚀 Purpose and Scope** This repository is designed to: 1. **Centralize Resources**: 📚 Provide a curated collection of GeneLab datasets tailored for space biology studies, ranging from whole genome sequencing to DNA methylation. 2. **Promote Transfer Learning**: 🎓 Offer pre-trained models suitable for transfer learning. 3. **Streamline Data Processing**: ⚙️ Offer code samples and scripts for efficient dataset management. 4. **Facilitate Collaboration**: 🤝 Foster collaboration amongst researchers in the field. 5. **Reference Architectures**: 🗺️ Navigate through transfer learning architectures with ease. ### **🎯 Intended Audience** This hub is for: - **Space Biologists**: 🔬 Integrating computational methodologies. - **Data Scientists & Machine Learning Enthusiasts**: 💻 Tackling challenges in space biology. - **Students & Educators**: 📖 Accessing resources for computational space biology. ### **✉️ Contributing and Feedback** We believe in community-driven science. 💖 Your contributions are warmly welcomed! By joining hands, we can venture further into the mysteries of space biology. --- 🛸 Join us in this interstellar journey of melding computation and space biology, steering the future of life in space. ## 📜 Table of Contents - [BioGalactic Models](#🌠-biogalactic-models) - [Datasets](#🧬-datasets) - [Insights On BioGalacticModels Zoo Usage & Exploration](#💭-insights-on-biogalacticmodels-zoo-usage-&-exploration) - [Transfer Learning Model Architectures for Space Biology](#🌐-promising-transfer-learning-model-architectures-for-space-biology) - [Demo: Predicting Viral Host based on Metagenomic Features](#🧪-demo-predicting-viral-host-based-on-metagenomic-features) --- ## 🌠 BioGalactic Models [BioGalactic Models](https://huggingface.co/spaces/Alfaxad/BioGalacticModels) 🌌 is a dedicated Hugging Face space containing a curated collection of Biology & Biochemistry Foundation Models. **Significance to the BioGalactic Model Zoo**: - **Ready-to-use Models**: 🚀 These models are pre-trained, optimized for transfer learning tasks. - **Diverse Applications**: 🎯 Focused on Biology & Biochemistry, catering to space biology. - **Continuous Evolution**: 🔄 As space biology progresses, this space will evolve. **Impacting Space Biology Exploration**: The models provide insights driving our understanding of life in space conditions. These include: - Decoding genomic sequences. - Predicting protein structures and interactions. - Analyzing metabolic pathways in space. --- ## 🧬 Datasets Dive into the curated datasets, specifically tailored for space biology studies. These datasets, coming directly from the vaults of NASA's GeneLab, cover a range of biological investigations relevant to space. ### **Whole Genome Sequencing Datasets** 1. [Microbiome profiling of feces from mice flown on the RR-10 mission](https://osdr.nasa.gov/bio/repo/data/studies/OSD-466) 2. [Metagenome profiling of feces from mice flown on the RR-23 mission](https://osdr.nasa.gov/bio/repo/data/studies/OSD-465) 3. [Whole genome sequencing and assembly of Eukaryotic microbes isolated from ISS environmental surface, Kirovograd region soil, Chernobyl Nuclear Power Plant and Chernobyl Exclusion Zone](https://osdr.nasa.gov/bio/repo/data/studies/OSD-132) 4. [Draft Genome Sequences of novel Agrobacterium genomospecies 3 Associated from the International Space Station](https://osdr.nasa.gov/bio/repo/data/studies/OSD-306) 5. [Metagenomic analysis of feces from mice flown on the RR-6 mission](https://osdr.nasa.gov/bio/repo/data/studies/OSD-249) 6. [Insta-Deep's Multi-species genome dataset](https://huggingface.co/datasets/InstaDeepAI/multi_species_genomes) ### **DNA Methylation Datasets** 1. [Changes in DNA Methylation in Arabidopsis thaliana Plants Exposed Over Multiple Generations to Gamma Radiation](https://osdr.nasa.gov/bio/repo/data/studies/OSD-520) 2. [Characterization of Epigenetic Regulation in an Extraterrestrial Environment: The Arabidopsis Spaceflight Methylome](https://osdr.nasa.gov/bio/repo/data/studies/OSD-217) 3. [Ionizing radiation induces transgenerational effects of DNA methylation in zebrafish](https://osdr.nasa.gov/bio/repo/data/studies/OSD-524) 4. [Methylome Analysis of Arabidopsis Seedlings Exposed to Microgravity](https://osdr.nasa.gov/bio/repo/data/studies/OSD-220) For an exhaustive list of datasets and other resources, explore [NASA's Open Science Data Repository (OSDR)](https://osdr.nasa.gov/bio/repo/search?q=&data_source=cgene,alsda&data_type=study). ## **Bulk Downloading GeneLab Datasets with genelab-utils** ### **Quick Usage Guide** # GeneLab utils Some helper programs for [NASA GeneLab](https://genelab.nasa.gov/), such as `GL-download-GLDS-data` for downloading files from a specific OSD or GLDS ID, and `GL-get-workflow` for downloading workflows used by [GeneLab for processing datasets](https://github.com/nasa/GeneLab_Data_Processing). ## Conda install The genelab-utils package should be installed with conda/mamba. If you are not familiar with conda, you can find an introduction [here](https://astrobiomike.github.io/unix/conda-intro) if wanted, and if you are not familiar with mamba, there is a super-short introduction on that same page [here](https://astrobiomike.github.io/unix/conda-intro#bonus-mamba-no-5) if wanted – it's definitely worth using mamba if you use conda at all :+1: ```bash conda install -c conda-forge -n base mamba mamba create -n genelab-utils -c conda-forge -c bioconda -c defaults -c astrobiomike genelab-utils conda activate genelab-utils ``` All programs are prefixed with `GL-` and have a help menu accessible with `-h`. Version info can be accessed with `GL-version`. ## Some example pages - Programmatically downloading [GLDS data](https://genelab-data.ndc.nasa.gov/genelab/) - [`GL-download-GLDS-data`](https://hackmd.io/@astrobiomike/using-genelab-utils-to-download-GLDS-data) - Downloading GeneLab workflows - [`GL-get-workflow`](https://hackmd.io/@astrobiomike/using-genelab-utils-to-download-workflows) --- ## 💭 Insights On BioGalacticModels Zoo Usage & Exploration ### **1. Preprocessing** For transfer learning these biomedical datasets may require various preprocessing steps depending on their source and format: - **Data Cleaning:** Removing noise and inconsistencies. - **Normalization:** Scaling features to a standard range. - **Data Augmentation:** Especially for image datasets, augmenting data can help improve model robustness. - **Feature Selection/Extraction:** Especially in genomics, where dimensionality can be very high. - **Handling Imbalances:** In some datasets, certain classes may be underrepresented. - **Format Conversion:** Datasets might need to be converted to formats compatible with machine learning frameworks. ### **3. Potential Multimodal Data Combinations for Space Biology Knowledge Gain** Combining different types of datasets, like genomic, proteomic, and transcriptomic data, can provide a holistic view of biological systems. Additionally, integrating imaging data with molecular data can enhance our understanding of spatial-temporal patterns. Multi-modal datasets can help discover patterns or signals that might not be evident when analyzing data types in isolation. #### a. **Genomic & Transcriptomic Data**: - **Why**: While genomic data (like Whole Genome Sequencing) provides the blueprint of life, transcriptomic data offers insights into gene expression under specific conditions. Combining both can help in understanding the genetic basis of responses to space environments and how genes are expressed differently in space. #### b. **Proteomic & Metabolomic Data**: - **Why**: Proteomic data tells us about the proteins produced, while metabolomic data provides information on the small molecules in an organism. Together, they can offer insights into the functional state of cells in space, revealing which proteins are active and what metabolic pathways they're influencing. #### c. **Transcriptomic & Metabolomic Data**: - **Why**: This combination can correlate gene expression with metabolic changes. It can be particularly insightful to understand how gene expression changes influence metabolic responses in space conditions. #### d. **Genomic & Phenotypic Data**: - **Why**: Connecting the genetic makeup with observable traits (phenotypes) can help in predicting how specific genetic variations might influence an organism's ability to thrive in space. #### e. **Imaging & Transcriptomic Data**: - **Why**: While transcriptomic data reveals gene expression, imaging (like MRI or microscopy) can show structural or functional changes in tissues or cells. Combined, they can link gene expression patterns with visual manifestations. #### f. **Epigenomic & Transcriptomic Data**: - **Why**: Epigenomic data, like DNA Methylation, reveals changes in gene activity not caused by DNA sequence changes. By combining it with transcriptomic data, one can understand how space conditions might epigenetically influence gene expression. #### g. **Genomic & Proteomic Data**: - **Why**: This combination can be used to understand the translation of genes to proteins under space conditions, offering insights into post-transcriptional modifications in space. #### h. **Environmental Data & Any Biological Data**: - **Why**: Combining data on the space environment (like radiation levels or microgravity conditions) with any biological dataset can help correlate external conditions with biological responses. The task of organizing multimodal datasets may face the following challenges: 1. **Data Integration**: Combining data from different sources and modalities can be challenging due to differences in scale, resolution, and format. 2. **Interpretability**: While multi-modal data can provide richer insights, it can also make interpretations complex. 3. **Computational Needs**: Integrating and analyzing multi-modal data often requires robust computational resources and specialized algorithms. However, the potential insights gained from such combinations, especially in understanding the complex biological responses to space conditions, can be invaluable. Leveraging transfer learning with models pretrained on diverse biomedical datasets and refined on space biology datasets can significantly boost the knowledge derived from these multi-modal combinations. --- ## 🌐 Promising Transfer Learning Model Architectures for Space Biology The deep learning domain has birthed numerous architectures tailor-made for transfer learning. These models, having trained on expansive datasets, excel at grasping general features, which can be specialized for niche tasks, such as those in space biology. Here's a selection of architectures ripe for exploration in this challenge: ### 1. **Convolutional Neural Networks (CNNs)**: Primarily efficient for image-centric data. - **VGG (e.g., VGG16, VGG19)**: Crafted by the Visual Geometry Group, it's a staple for image recognition. - **ResNet**: Features skip connections, countering the vanishing gradient dilemma in deep structures. - **Inception (or GoogLeNet)**: Employs varied convolution sizes for multi-scale detail capture. - **DenseNet**: Innovatively links each layer to every subsequent one in a feed-forward manner. ### 2. **Transformers**: Originally for NLP, but have branched out to other areas like imagery. - **BERT**: Tailored for NLP, it's versatile for text-oriented tasks. - **ViT (Vision Transformer)**: Modifies the transformer design for visual tasks. ### 3. **Recurrent Neural Networks (RNNs)**: Best suited for sequences such as time-series or biological sequences. - **LSTM**: Counters the standard RNN's vanishing gradient issue. - **GRU**: A streamlined LSTM variant. ### 4. **Autoencoders**: For unsupervised learning, adept at feature extraction from unlabeled content. - **Variational Autoencoders (VAEs)**: Introduces a probabilistic layer to autoencoders, frequently in generative scenarios. ### 5. **Generative Adversarial Networks (GANs)**: Ideal for dataset augmentation, synthesizing data resembling the original distribution. ### 6. **U-Net**: Conceived for biomedical image segmentation, amalgamating a context-capturing contractive route with a precision-centric expanding one. ### 7. **Capsule Networks**: Navigates the spatial hierarchy between simple and intricate objects in visuals, potentially invaluable for intricate biological imaging. ### 8. **EfficientNet**: Balances network breadth, depth, and clarity using fixed scaling coefficients, creating potentially smaller yet more precise models. ### 9. **BioBERT**: A BERT variant pre-trained on biomedical datasets, apt for biology-centered tasks. ### 10. **AlphaFold**: By DeepMind, it revolutionizes protein structure prediction, a seminal biological conundrum. ### **Recommendations**: - For the unique aspects of space biology, initiating with biomedically proven architectures like U-Net could be fruitful. - LSTMs or GRUs, being RNN derivatives, could be promising for genomic or other sequential datasets. - GANs might be instrumental for data augmentation or crafting synthetic examples to enrich datasets. - For challenges surrounding protein structures or other molecular biology facets, models like AlphaFold are worthy contenders. --- ## 🧪 Demo: Predicting Viral Host based on Metagenomic Features In this repository,we also explore a demo using metagenomic features extracted from viral genomes to predict the virus host. Features include Genome size, GC%, and count of CDS. These serve as the independent variables to predict the viral host. An SVM (Support Vector Machine) model is used, achieving an accuracy rate of 86%. Dive deeper into the methods, data preprocessing, and results [here](https://huggingface.co/datasets/Alfaxad/Space-Biology-Model-Zoo/blob/main/viral_host_demo/predict-viral-host-based-on-meta-genomic-features.ipynb).
Alfaxad/BioGalacticModels-Zoo
[ "region:us" ]
2023-10-07T15:21:05+00:00
{}
2023-10-08T13:07:22+00:00
[]
[]
TAGS #region-us
!License Badge # BioGalacticModels Zoo ## Overview ### ️ Space Biology Datasets And Models Hub The frontier of space biology research is vast, with uncharted territories that hide the secrets of life beyond our blue planet. With the increasing need for accurate and reliable methods to understand and decode the effects of space on biology, machine learning, particularly transfer learning, emerges as a promising approach. This repository serves as a nexus between space biology and computational methodologies, aimed at harnessing the power of transfer learning for space biology applications. We present to you a comprehensive database of publicly available biomedical datasets and models that can be used to further space-biology research and discovery. ### Purpose and Scope This repository is designed to: 1. Centralize Resources: Provide a curated collection of GeneLab datasets tailored for space biology studies, ranging from whole genome sequencing to DNA methylation. 2. Promote Transfer Learning: Offer pre-trained models suitable for transfer learning. 3. Streamline Data Processing: ️ Offer code samples and scripts for efficient dataset management. 4. Facilitate Collaboration: Foster collaboration amongst researchers in the field. 5. Reference Architectures: ️ Navigate through transfer learning architectures with ease. ### Intended Audience This hub is for: - Space Biologists: Integrating computational methodologies. - Data Scientists & Machine Learning Enthusiasts: Tackling challenges in space biology. - Students & Educators: Accessing resources for computational space biology. ### ️ Contributing and Feedback We believe in community-driven science. Your contributions are warmly welcomed! By joining hands, we can venture further into the mysteries of space biology. --- Join us in this interstellar journey of melding computation and space biology, steering the future of life in space. ## Table of Contents - BioGalactic Models - Datasets - Insights On BioGalacticModels Zoo Usage & Exploration - Transfer Learning Model Architectures for Space Biology - Demo: Predicting Viral Host based on Metagenomic Features --- ## BioGalactic Models BioGalactic Models is a dedicated Hugging Face space containing a curated collection of Biology & Biochemistry Foundation Models. Significance to the BioGalactic Model Zoo: - Ready-to-use Models: These models are pre-trained, optimized for transfer learning tasks. - Diverse Applications: Focused on Biology & Biochemistry, catering to space biology. - Continuous Evolution: As space biology progresses, this space will evolve. Impacting Space Biology Exploration: The models provide insights driving our understanding of life in space conditions. These include: - Decoding genomic sequences. - Predicting protein structures and interactions. - Analyzing metabolic pathways in space. --- ## Datasets Dive into the curated datasets, specifically tailored for space biology studies. These datasets, coming directly from the vaults of NASA's GeneLab, cover a range of biological investigations relevant to space. ### Whole Genome Sequencing Datasets 1. Microbiome profiling of feces from mice flown on the RR-10 mission 2. Metagenome profiling of feces from mice flown on the RR-23 mission 3. Whole genome sequencing and assembly of Eukaryotic microbes isolated from ISS environmental surface, Kirovograd region soil, Chernobyl Nuclear Power Plant and Chernobyl Exclusion Zone 4. Draft Genome Sequences of novel Agrobacterium genomospecies 3 Associated from the International Space Station 5. Metagenomic analysis of feces from mice flown on the RR-6 mission 6. Insta-Deep's Multi-species genome dataset ### DNA Methylation Datasets 1. Changes in DNA Methylation in Arabidopsis thaliana Plants Exposed Over Multiple Generations to Gamma Radiation 2. Characterization of Epigenetic Regulation in an Extraterrestrial Environment: The Arabidopsis Spaceflight Methylome 3. Ionizing radiation induces transgenerational effects of DNA methylation in zebrafish 4. Methylome Analysis of Arabidopsis Seedlings Exposed to Microgravity For an exhaustive list of datasets and other resources, explore NASA's Open Science Data Repository (OSDR). ## Bulk Downloading GeneLab Datasets with genelab-utils ### Quick Usage Guide # GeneLab utils Some helper programs for NASA GeneLab, such as 'GL-download-GLDS-data' for downloading files from a specific OSD or GLDS ID, and 'GL-get-workflow' for downloading workflows used by GeneLab for processing datasets. ## Conda install The genelab-utils package should be installed with conda/mamba. If you are not familiar with conda, you can find an introduction here if wanted, and if you are not familiar with mamba, there is a super-short introduction on that same page here if wanted – it's definitely worth using mamba if you use conda at all :+1: All programs are prefixed with 'GL-' and have a help menu accessible with '-h'. Version info can be accessed with 'GL-version'. ## Some example pages - Programmatically downloading GLDS data - 'GL-download-GLDS-data' - Downloading GeneLab workflows - 'GL-get-workflow' --- ## Insights On BioGalacticModels Zoo Usage & Exploration ### 1. Preprocessing For transfer learning these biomedical datasets may require various preprocessing steps depending on their source and format: - Data Cleaning: Removing noise and inconsistencies. - Normalization: Scaling features to a standard range. - Data Augmentation: Especially for image datasets, augmenting data can help improve model robustness. - Feature Selection/Extraction: Especially in genomics, where dimensionality can be very high. - Handling Imbalances: In some datasets, certain classes may be underrepresented. - Format Conversion: Datasets might need to be converted to formats compatible with machine learning frameworks. ### 3. Potential Multimodal Data Combinations for Space Biology Knowledge Gain Combining different types of datasets, like genomic, proteomic, and transcriptomic data, can provide a holistic view of biological systems. Additionally, integrating imaging data with molecular data can enhance our understanding of spatial-temporal patterns. Multi-modal datasets can help discover patterns or signals that might not be evident when analyzing data types in isolation. #### a. Genomic & Transcriptomic Data: - Why: While genomic data (like Whole Genome Sequencing) provides the blueprint of life, transcriptomic data offers insights into gene expression under specific conditions. Combining both can help in understanding the genetic basis of responses to space environments and how genes are expressed differently in space. #### b. Proteomic & Metabolomic Data: - Why: Proteomic data tells us about the proteins produced, while metabolomic data provides information on the small molecules in an organism. Together, they can offer insights into the functional state of cells in space, revealing which proteins are active and what metabolic pathways they're influencing. #### c. Transcriptomic & Metabolomic Data: - Why: This combination can correlate gene expression with metabolic changes. It can be particularly insightful to understand how gene expression changes influence metabolic responses in space conditions. #### d. Genomic & Phenotypic Data: - Why: Connecting the genetic makeup with observable traits (phenotypes) can help in predicting how specific genetic variations might influence an organism's ability to thrive in space. #### e. Imaging & Transcriptomic Data: - Why: While transcriptomic data reveals gene expression, imaging (like MRI or microscopy) can show structural or functional changes in tissues or cells. Combined, they can link gene expression patterns with visual manifestations. #### f. Epigenomic & Transcriptomic Data: - Why: Epigenomic data, like DNA Methylation, reveals changes in gene activity not caused by DNA sequence changes. By combining it with transcriptomic data, one can understand how space conditions might epigenetically influence gene expression. #### g. Genomic & Proteomic Data: - Why: This combination can be used to understand the translation of genes to proteins under space conditions, offering insights into post-transcriptional modifications in space. #### h. Environmental Data & Any Biological Data: - Why: Combining data on the space environment (like radiation levels or microgravity conditions) with any biological dataset can help correlate external conditions with biological responses. The task of organizing multimodal datasets may face the following challenges: 1. Data Integration: Combining data from different sources and modalities can be challenging due to differences in scale, resolution, and format. 2. Interpretability: While multi-modal data can provide richer insights, it can also make interpretations complex. 3. Computational Needs: Integrating and analyzing multi-modal data often requires robust computational resources and specialized algorithms. However, the potential insights gained from such combinations, especially in understanding the complex biological responses to space conditions, can be invaluable. Leveraging transfer learning with models pretrained on diverse biomedical datasets and refined on space biology datasets can significantly boost the knowledge derived from these multi-modal combinations. --- ## Promising Transfer Learning Model Architectures for Space Biology The deep learning domain has birthed numerous architectures tailor-made for transfer learning. These models, having trained on expansive datasets, excel at grasping general features, which can be specialized for niche tasks, such as those in space biology. Here's a selection of architectures ripe for exploration in this challenge: ### 1. Convolutional Neural Networks (CNNs): Primarily efficient for image-centric data. - VGG (e.g., VGG16, VGG19): Crafted by the Visual Geometry Group, it's a staple for image recognition. - ResNet: Features skip connections, countering the vanishing gradient dilemma in deep structures. - Inception (or GoogLeNet): Employs varied convolution sizes for multi-scale detail capture. - DenseNet: Innovatively links each layer to every subsequent one in a feed-forward manner. ### 2. Transformers: Originally for NLP, but have branched out to other areas like imagery. - BERT: Tailored for NLP, it's versatile for text-oriented tasks. - ViT (Vision Transformer): Modifies the transformer design for visual tasks. ### 3. Recurrent Neural Networks (RNNs): Best suited for sequences such as time-series or biological sequences. - LSTM: Counters the standard RNN's vanishing gradient issue. - GRU: A streamlined LSTM variant. ### 4. Autoencoders: For unsupervised learning, adept at feature extraction from unlabeled content. - Variational Autoencoders (VAEs): Introduces a probabilistic layer to autoencoders, frequently in generative scenarios. ### 5. Generative Adversarial Networks (GANs): Ideal for dataset augmentation, synthesizing data resembling the original distribution. ### 6. U-Net: Conceived for biomedical image segmentation, amalgamating a context-capturing contractive route with a precision-centric expanding one. ### 7. Capsule Networks: Navigates the spatial hierarchy between simple and intricate objects in visuals, potentially invaluable for intricate biological imaging. ### 8. EfficientNet: Balances network breadth, depth, and clarity using fixed scaling coefficients, creating potentially smaller yet more precise models. ### 9. BioBERT: A BERT variant pre-trained on biomedical datasets, apt for biology-centered tasks. ### 10. AlphaFold: By DeepMind, it revolutionizes protein structure prediction, a seminal biological conundrum. ### Recommendations: - For the unique aspects of space biology, initiating with biomedically proven architectures like U-Net could be fruitful. - LSTMs or GRUs, being RNN derivatives, could be promising for genomic or other sequential datasets. - GANs might be instrumental for data augmentation or crafting synthetic examples to enrich datasets. - For challenges surrounding protein structures or other molecular biology facets, models like AlphaFold are worthy contenders. --- ## Demo: Predicting Viral Host based on Metagenomic Features In this repository,we also explore a demo using metagenomic features extracted from viral genomes to predict the virus host. Features include Genome size, GC%, and count of CDS. These serve as the independent variables to predict the viral host. An SVM (Support Vector Machine) model is used, achieving an accuracy rate of 86%. Dive deeper into the methods, data preprocessing, and results here.
[ "# BioGalacticModels Zoo", "## Overview", "### ️ Space Biology Datasets And Models Hub\n\nThe frontier of space biology research is vast, with uncharted territories that hide the secrets of life beyond our blue planet. With the increasing need for accurate and reliable methods to understand and decode the effects of space on biology, machine learning, particularly transfer learning, emerges as a promising approach. This repository serves as a nexus between space biology and computational methodologies, aimed at harnessing the power of transfer learning for space biology applications. \n\nWe present to you a comprehensive database of publicly available biomedical datasets and models that can be used to further space-biology research and discovery.", "### Purpose and Scope\n\nThis repository is designed to:\n\n1. Centralize Resources: Provide a curated collection of GeneLab datasets tailored for space biology studies, ranging from whole genome sequencing to DNA methylation.\n2. Promote Transfer Learning: Offer pre-trained models suitable for transfer learning.\n3. Streamline Data Processing: ️ Offer code samples and scripts for efficient dataset management.\n4. Facilitate Collaboration: Foster collaboration amongst researchers in the field.\n5. Reference Architectures: ️ Navigate through transfer learning architectures with ease.", "### Intended Audience\n\nThis hub is for:\n\n- Space Biologists: Integrating computational methodologies.\n- Data Scientists & Machine Learning Enthusiasts: Tackling challenges in space biology.\n- Students & Educators: Accessing resources for computational space biology.", "### ️ Contributing and Feedback\n\nWe believe in community-driven science. Your contributions are warmly welcomed! By joining hands, we can venture further into the mysteries of space biology.\n\n---\n\n Join us in this interstellar journey of melding computation and space biology, steering the future of life in space.", "## Table of Contents\n\n- BioGalactic Models\n- Datasets\n- Insights On BioGalacticModels Zoo Usage & Exploration\n- Transfer Learning Model Architectures for Space Biology\n- Demo: Predicting Viral Host based on Metagenomic Features\n\n---", "## BioGalactic Models\n\nBioGalactic Models is a dedicated Hugging Face space containing a curated collection of Biology & Biochemistry Foundation Models. \n\nSignificance to the BioGalactic Model Zoo:\n- Ready-to-use Models: These models are pre-trained, optimized for transfer learning tasks.\n- Diverse Applications: Focused on Biology & Biochemistry, catering to space biology.\n- Continuous Evolution: As space biology progresses, this space will evolve.\n\nImpacting Space Biology Exploration:\nThe models provide insights driving our understanding of life in space conditions. These include:\n- Decoding genomic sequences.\n- Predicting protein structures and interactions.\n- Analyzing metabolic pathways in space.\n\n---", "## Datasets\n\nDive into the curated datasets, specifically tailored for space biology studies. These datasets, coming directly from the vaults of NASA's GeneLab, cover a range of biological investigations relevant to space.", "### Whole Genome Sequencing Datasets\n\n1. Microbiome profiling of feces from mice flown on the RR-10 mission\n2. Metagenome profiling of feces from mice flown on the RR-23 mission\n3. Whole genome sequencing and assembly of Eukaryotic microbes isolated from ISS environmental surface, Kirovograd region soil, Chernobyl Nuclear Power Plant and Chernobyl Exclusion Zone\n4. Draft Genome Sequences of novel Agrobacterium genomospecies 3 Associated from the International Space Station\n5. Metagenomic analysis of feces from mice flown on the RR-6 mission\n6. Insta-Deep's Multi-species genome dataset", "### DNA Methylation Datasets\n\n1. Changes in DNA Methylation in Arabidopsis thaliana Plants Exposed Over Multiple Generations to Gamma Radiation\n2. Characterization of Epigenetic Regulation in an Extraterrestrial Environment: The Arabidopsis Spaceflight Methylome\n3. Ionizing radiation induces transgenerational effects of DNA methylation in zebrafish\n4. Methylome Analysis of Arabidopsis Seedlings Exposed to Microgravity\n\nFor an exhaustive list of datasets and other resources, explore NASA's Open Science Data Repository (OSDR).", "## Bulk Downloading GeneLab Datasets with genelab-utils", "### Quick Usage Guide", "# GeneLab utils\n\nSome helper programs for NASA GeneLab, such as 'GL-download-GLDS-data' for downloading files from a specific OSD or GLDS ID, and 'GL-get-workflow' for downloading workflows used by GeneLab for processing datasets.", "## Conda install\nThe genelab-utils package should be installed with conda/mamba. If you are not familiar with conda, you can find an introduction here if wanted, and if you are not familiar with mamba, there is a super-short introduction on that same page here if wanted – it's definitely worth using mamba if you use conda at all :+1: \n\n\n\n\nAll programs are prefixed with 'GL-' and have a help menu accessible with '-h'. Version info can be accessed with 'GL-version'.", "## Some example pages\n- Programmatically downloading GLDS data\n - 'GL-download-GLDS-data' \n- Downloading GeneLab workflows\n - 'GL-get-workflow' \n\n\n---", "## Insights On BioGalacticModels Zoo Usage & Exploration", "### 1. Preprocessing\nFor transfer learning these biomedical datasets may require various preprocessing steps depending on their source and format:\n- Data Cleaning: Removing noise and inconsistencies.\n- Normalization: Scaling features to a standard range.\n- Data Augmentation: Especially for image datasets, augmenting data can help improve model robustness.\n- Feature Selection/Extraction: Especially in genomics, where dimensionality can be very high.\n- Handling Imbalances: In some datasets, certain classes may be underrepresented.\n- Format Conversion: Datasets might need to be converted to formats compatible with machine learning frameworks.", "### 3. Potential Multimodal Data Combinations for Space Biology Knowledge Gain\nCombining different types of datasets, like genomic, proteomic, and transcriptomic data, can provide a holistic view of biological systems.\nAdditionally, integrating imaging data with molecular data can enhance our understanding of spatial-temporal patterns. \nMulti-modal datasets can help discover patterns or signals that might not be evident when analyzing data types in isolation.", "#### a. Genomic & Transcriptomic Data:\n- Why: While genomic data (like Whole Genome Sequencing) provides the blueprint of life, transcriptomic data offers insights into gene expression under specific conditions. Combining both can help in understanding the genetic basis of responses to space environments and how genes are expressed differently in space.", "#### b. Proteomic & Metabolomic Data:\n- Why: Proteomic data tells us about the proteins produced, while metabolomic data provides information on the small molecules in an organism. Together, they can offer insights into the functional state of cells in space, revealing which proteins are active and what metabolic pathways they're influencing.", "#### c. Transcriptomic & Metabolomic Data:\n- Why: This combination can correlate gene expression with metabolic changes. It can be particularly insightful to understand how gene expression changes influence metabolic responses in space conditions.", "#### d. Genomic & Phenotypic Data:\n- Why: Connecting the genetic makeup with observable traits (phenotypes) can help in predicting how specific genetic variations might influence an organism's ability to thrive in space.", "#### e. Imaging & Transcriptomic Data:\n- Why: While transcriptomic data reveals gene expression, imaging (like MRI or microscopy) can show structural or functional changes in tissues or cells. Combined, they can link gene expression patterns with visual manifestations.", "#### f. Epigenomic & Transcriptomic Data:\n- Why: Epigenomic data, like DNA Methylation, reveals changes in gene activity not caused by DNA sequence changes. By combining it with transcriptomic data, one can understand how space conditions might epigenetically influence gene expression.", "#### g. Genomic & Proteomic Data:\n- Why: This combination can be used to understand the translation of genes to proteins under space conditions, offering insights into post-transcriptional modifications in space.", "#### h. Environmental Data & Any Biological Data:\n- Why: Combining data on the space environment (like radiation levels or microgravity conditions) with any biological dataset can help correlate external conditions with biological responses.\n\nThe task of organizing multimodal datasets may face the following challenges:\n\n1. Data Integration: Combining data from different sources and modalities can be challenging due to differences in scale, resolution, and format.\n2. Interpretability: While multi-modal data can provide richer insights, it can also make interpretations complex.\n3. Computational Needs: Integrating and analyzing multi-modal data often requires robust computational resources and specialized algorithms.\n\nHowever, the potential insights gained from such combinations, especially in understanding the complex biological responses to space conditions, can be invaluable.\nLeveraging transfer learning with models pretrained on diverse biomedical datasets and refined on space biology datasets can significantly boost the knowledge derived from these multi-modal combinations.\n\n\n\n---", "## Promising Transfer Learning Model Architectures for Space Biology\n\nThe deep learning domain has birthed numerous architectures tailor-made for transfer learning. These models, having trained on expansive datasets, excel at grasping general features, which can be specialized for niche tasks, such as those in space biology. Here's a selection of architectures ripe for exploration in this challenge:", "### 1. Convolutional Neural Networks (CNNs): \nPrimarily efficient for image-centric data.\n - VGG (e.g., VGG16, VGG19): Crafted by the Visual Geometry Group, it's a staple for image recognition.\n - ResNet: Features skip connections, countering the vanishing gradient dilemma in deep structures.\n - Inception (or GoogLeNet): Employs varied convolution sizes for multi-scale detail capture.\n - DenseNet: Innovatively links each layer to every subsequent one in a feed-forward manner.", "### 2. Transformers: \nOriginally for NLP, but have branched out to other areas like imagery.\n - BERT: Tailored for NLP, it's versatile for text-oriented tasks.\n - ViT (Vision Transformer): Modifies the transformer design for visual tasks.", "### 3. Recurrent Neural Networks (RNNs): \nBest suited for sequences such as time-series or biological sequences.\n - LSTM: Counters the standard RNN's vanishing gradient issue.\n - GRU: A streamlined LSTM variant.", "### 4. Autoencoders: \nFor unsupervised learning, adept at feature extraction from unlabeled content.\n - Variational Autoencoders (VAEs): Introduces a probabilistic layer to autoencoders, frequently in generative scenarios.", "### 5. Generative Adversarial Networks (GANs): \nIdeal for dataset augmentation, synthesizing data resembling the original distribution.", "### 6. U-Net: \nConceived for biomedical image segmentation, amalgamating a context-capturing contractive route with a precision-centric expanding one.", "### 7. Capsule Networks: \nNavigates the spatial hierarchy between simple and intricate objects in visuals, potentially invaluable for intricate biological imaging.", "### 8. EfficientNet: \nBalances network breadth, depth, and clarity using fixed scaling coefficients, creating potentially smaller yet more precise models.", "### 9. BioBERT: \nA BERT variant pre-trained on biomedical datasets, apt for biology-centered tasks.", "### 10. AlphaFold: \nBy DeepMind, it revolutionizes protein structure prediction, a seminal biological conundrum.", "### Recommendations:\n- For the unique aspects of space biology, initiating with biomedically proven architectures like U-Net could be fruitful.\n- LSTMs or GRUs, being RNN derivatives, could be promising for genomic or other sequential datasets.\n- GANs might be instrumental for data augmentation or crafting synthetic examples to enrich datasets.\n- For challenges surrounding protein structures or other molecular biology facets, models like AlphaFold are worthy contenders.\n\n\n---", "## Demo: Predicting Viral Host based on Metagenomic Features\n\nIn this repository,we also explore a demo using metagenomic features extracted from viral genomes to predict the virus host. Features include Genome size, GC%, and count of CDS. These serve as the independent variables to predict the viral host.\n\nAn SVM (Support Vector Machine) model is used, achieving an accuracy rate of 86%. Dive deeper into the methods, data preprocessing, and results here." ]
[ "TAGS\n#region-us \n", "# BioGalacticModels Zoo", "## Overview", "### ️ Space Biology Datasets And Models Hub\n\nThe frontier of space biology research is vast, with uncharted territories that hide the secrets of life beyond our blue planet. With the increasing need for accurate and reliable methods to understand and decode the effects of space on biology, machine learning, particularly transfer learning, emerges as a promising approach. This repository serves as a nexus between space biology and computational methodologies, aimed at harnessing the power of transfer learning for space biology applications. \n\nWe present to you a comprehensive database of publicly available biomedical datasets and models that can be used to further space-biology research and discovery.", "### Purpose and Scope\n\nThis repository is designed to:\n\n1. Centralize Resources: Provide a curated collection of GeneLab datasets tailored for space biology studies, ranging from whole genome sequencing to DNA methylation.\n2. Promote Transfer Learning: Offer pre-trained models suitable for transfer learning.\n3. Streamline Data Processing: ️ Offer code samples and scripts for efficient dataset management.\n4. Facilitate Collaboration: Foster collaboration amongst researchers in the field.\n5. Reference Architectures: ️ Navigate through transfer learning architectures with ease.", "### Intended Audience\n\nThis hub is for:\n\n- Space Biologists: Integrating computational methodologies.\n- Data Scientists & Machine Learning Enthusiasts: Tackling challenges in space biology.\n- Students & Educators: Accessing resources for computational space biology.", "### ️ Contributing and Feedback\n\nWe believe in community-driven science. Your contributions are warmly welcomed! By joining hands, we can venture further into the mysteries of space biology.\n\n---\n\n Join us in this interstellar journey of melding computation and space biology, steering the future of life in space.", "## Table of Contents\n\n- BioGalactic Models\n- Datasets\n- Insights On BioGalacticModels Zoo Usage & Exploration\n- Transfer Learning Model Architectures for Space Biology\n- Demo: Predicting Viral Host based on Metagenomic Features\n\n---", "## BioGalactic Models\n\nBioGalactic Models is a dedicated Hugging Face space containing a curated collection of Biology & Biochemistry Foundation Models. \n\nSignificance to the BioGalactic Model Zoo:\n- Ready-to-use Models: These models are pre-trained, optimized for transfer learning tasks.\n- Diverse Applications: Focused on Biology & Biochemistry, catering to space biology.\n- Continuous Evolution: As space biology progresses, this space will evolve.\n\nImpacting Space Biology Exploration:\nThe models provide insights driving our understanding of life in space conditions. These include:\n- Decoding genomic sequences.\n- Predicting protein structures and interactions.\n- Analyzing metabolic pathways in space.\n\n---", "## Datasets\n\nDive into the curated datasets, specifically tailored for space biology studies. These datasets, coming directly from the vaults of NASA's GeneLab, cover a range of biological investigations relevant to space.", "### Whole Genome Sequencing Datasets\n\n1. Microbiome profiling of feces from mice flown on the RR-10 mission\n2. Metagenome profiling of feces from mice flown on the RR-23 mission\n3. Whole genome sequencing and assembly of Eukaryotic microbes isolated from ISS environmental surface, Kirovograd region soil, Chernobyl Nuclear Power Plant and Chernobyl Exclusion Zone\n4. Draft Genome Sequences of novel Agrobacterium genomospecies 3 Associated from the International Space Station\n5. Metagenomic analysis of feces from mice flown on the RR-6 mission\n6. Insta-Deep's Multi-species genome dataset", "### DNA Methylation Datasets\n\n1. Changes in DNA Methylation in Arabidopsis thaliana Plants Exposed Over Multiple Generations to Gamma Radiation\n2. Characterization of Epigenetic Regulation in an Extraterrestrial Environment: The Arabidopsis Spaceflight Methylome\n3. Ionizing radiation induces transgenerational effects of DNA methylation in zebrafish\n4. Methylome Analysis of Arabidopsis Seedlings Exposed to Microgravity\n\nFor an exhaustive list of datasets and other resources, explore NASA's Open Science Data Repository (OSDR).", "## Bulk Downloading GeneLab Datasets with genelab-utils", "### Quick Usage Guide", "# GeneLab utils\n\nSome helper programs for NASA GeneLab, such as 'GL-download-GLDS-data' for downloading files from a specific OSD or GLDS ID, and 'GL-get-workflow' for downloading workflows used by GeneLab for processing datasets.", "## Conda install\nThe genelab-utils package should be installed with conda/mamba. If you are not familiar with conda, you can find an introduction here if wanted, and if you are not familiar with mamba, there is a super-short introduction on that same page here if wanted – it's definitely worth using mamba if you use conda at all :+1: \n\n\n\n\nAll programs are prefixed with 'GL-' and have a help menu accessible with '-h'. Version info can be accessed with 'GL-version'.", "## Some example pages\n- Programmatically downloading GLDS data\n - 'GL-download-GLDS-data' \n- Downloading GeneLab workflows\n - 'GL-get-workflow' \n\n\n---", "## Insights On BioGalacticModels Zoo Usage & Exploration", "### 1. Preprocessing\nFor transfer learning these biomedical datasets may require various preprocessing steps depending on their source and format:\n- Data Cleaning: Removing noise and inconsistencies.\n- Normalization: Scaling features to a standard range.\n- Data Augmentation: Especially for image datasets, augmenting data can help improve model robustness.\n- Feature Selection/Extraction: Especially in genomics, where dimensionality can be very high.\n- Handling Imbalances: In some datasets, certain classes may be underrepresented.\n- Format Conversion: Datasets might need to be converted to formats compatible with machine learning frameworks.", "### 3. Potential Multimodal Data Combinations for Space Biology Knowledge Gain\nCombining different types of datasets, like genomic, proteomic, and transcriptomic data, can provide a holistic view of biological systems.\nAdditionally, integrating imaging data with molecular data can enhance our understanding of spatial-temporal patterns. \nMulti-modal datasets can help discover patterns or signals that might not be evident when analyzing data types in isolation.", "#### a. Genomic & Transcriptomic Data:\n- Why: While genomic data (like Whole Genome Sequencing) provides the blueprint of life, transcriptomic data offers insights into gene expression under specific conditions. Combining both can help in understanding the genetic basis of responses to space environments and how genes are expressed differently in space.", "#### b. Proteomic & Metabolomic Data:\n- Why: Proteomic data tells us about the proteins produced, while metabolomic data provides information on the small molecules in an organism. Together, they can offer insights into the functional state of cells in space, revealing which proteins are active and what metabolic pathways they're influencing.", "#### c. Transcriptomic & Metabolomic Data:\n- Why: This combination can correlate gene expression with metabolic changes. It can be particularly insightful to understand how gene expression changes influence metabolic responses in space conditions.", "#### d. Genomic & Phenotypic Data:\n- Why: Connecting the genetic makeup with observable traits (phenotypes) can help in predicting how specific genetic variations might influence an organism's ability to thrive in space.", "#### e. Imaging & Transcriptomic Data:\n- Why: While transcriptomic data reveals gene expression, imaging (like MRI or microscopy) can show structural or functional changes in tissues or cells. Combined, they can link gene expression patterns with visual manifestations.", "#### f. Epigenomic & Transcriptomic Data:\n- Why: Epigenomic data, like DNA Methylation, reveals changes in gene activity not caused by DNA sequence changes. By combining it with transcriptomic data, one can understand how space conditions might epigenetically influence gene expression.", "#### g. Genomic & Proteomic Data:\n- Why: This combination can be used to understand the translation of genes to proteins under space conditions, offering insights into post-transcriptional modifications in space.", "#### h. Environmental Data & Any Biological Data:\n- Why: Combining data on the space environment (like radiation levels or microgravity conditions) with any biological dataset can help correlate external conditions with biological responses.\n\nThe task of organizing multimodal datasets may face the following challenges:\n\n1. Data Integration: Combining data from different sources and modalities can be challenging due to differences in scale, resolution, and format.\n2. Interpretability: While multi-modal data can provide richer insights, it can also make interpretations complex.\n3. Computational Needs: Integrating and analyzing multi-modal data often requires robust computational resources and specialized algorithms.\n\nHowever, the potential insights gained from such combinations, especially in understanding the complex biological responses to space conditions, can be invaluable.\nLeveraging transfer learning with models pretrained on diverse biomedical datasets and refined on space biology datasets can significantly boost the knowledge derived from these multi-modal combinations.\n\n\n\n---", "## Promising Transfer Learning Model Architectures for Space Biology\n\nThe deep learning domain has birthed numerous architectures tailor-made for transfer learning. These models, having trained on expansive datasets, excel at grasping general features, which can be specialized for niche tasks, such as those in space biology. Here's a selection of architectures ripe for exploration in this challenge:", "### 1. Convolutional Neural Networks (CNNs): \nPrimarily efficient for image-centric data.\n - VGG (e.g., VGG16, VGG19): Crafted by the Visual Geometry Group, it's a staple for image recognition.\n - ResNet: Features skip connections, countering the vanishing gradient dilemma in deep structures.\n - Inception (or GoogLeNet): Employs varied convolution sizes for multi-scale detail capture.\n - DenseNet: Innovatively links each layer to every subsequent one in a feed-forward manner.", "### 2. Transformers: \nOriginally for NLP, but have branched out to other areas like imagery.\n - BERT: Tailored for NLP, it's versatile for text-oriented tasks.\n - ViT (Vision Transformer): Modifies the transformer design for visual tasks.", "### 3. Recurrent Neural Networks (RNNs): \nBest suited for sequences such as time-series or biological sequences.\n - LSTM: Counters the standard RNN's vanishing gradient issue.\n - GRU: A streamlined LSTM variant.", "### 4. Autoencoders: \nFor unsupervised learning, adept at feature extraction from unlabeled content.\n - Variational Autoencoders (VAEs): Introduces a probabilistic layer to autoencoders, frequently in generative scenarios.", "### 5. Generative Adversarial Networks (GANs): \nIdeal for dataset augmentation, synthesizing data resembling the original distribution.", "### 6. U-Net: \nConceived for biomedical image segmentation, amalgamating a context-capturing contractive route with a precision-centric expanding one.", "### 7. Capsule Networks: \nNavigates the spatial hierarchy between simple and intricate objects in visuals, potentially invaluable for intricate biological imaging.", "### 8. EfficientNet: \nBalances network breadth, depth, and clarity using fixed scaling coefficients, creating potentially smaller yet more precise models.", "### 9. BioBERT: \nA BERT variant pre-trained on biomedical datasets, apt for biology-centered tasks.", "### 10. AlphaFold: \nBy DeepMind, it revolutionizes protein structure prediction, a seminal biological conundrum.", "### Recommendations:\n- For the unique aspects of space biology, initiating with biomedically proven architectures like U-Net could be fruitful.\n- LSTMs or GRUs, being RNN derivatives, could be promising for genomic or other sequential datasets.\n- GANs might be instrumental for data augmentation or crafting synthetic examples to enrich datasets.\n- For challenges surrounding protein structures or other molecular biology facets, models like AlphaFold are worthy contenders.\n\n\n---", "## Demo: Predicting Viral Host based on Metagenomic Features\n\nIn this repository,we also explore a demo using metagenomic features extracted from viral genomes to predict the virus host. Features include Genome size, GC%, and count of CDS. These serve as the independent variables to predict the viral host.\n\nAn SVM (Support Vector Machine) model is used, achieving an accuracy rate of 86%. Dive deeper into the methods, data preprocessing, and results here." ]
[ 6, 8, 3, 153, 133, 64, 72, 58, 175, 55, 161, 134, 16, 6, 66, 120, 42, 16, 152, 110, 81, 86, 50, 55, 67, 70, 49, 228, 90, 138, 69, 67, 53, 32, 40, 44, 38, 34, 29, 120, 116 ]
[ "passage: TAGS\n#region-us \n# BioGalacticModels Zoo## Overview### ️ Space Biology Datasets And Models Hub\n\nThe frontier of space biology research is vast, with uncharted territories that hide the secrets of life beyond our blue planet. With the increasing need for accurate and reliable methods to understand and decode the effects of space on biology, machine learning, particularly transfer learning, emerges as a promising approach. This repository serves as a nexus between space biology and computational methodologies, aimed at harnessing the power of transfer learning for space biology applications. \n\nWe present to you a comprehensive database of publicly available biomedical datasets and models that can be used to further space-biology research and discovery.### Purpose and Scope\n\nThis repository is designed to:\n\n1. Centralize Resources: Provide a curated collection of GeneLab datasets tailored for space biology studies, ranging from whole genome sequencing to DNA methylation.\n2. Promote Transfer Learning: Offer pre-trained models suitable for transfer learning.\n3. Streamline Data Processing: ️ Offer code samples and scripts for efficient dataset management.\n4. Facilitate Collaboration: Foster collaboration amongst researchers in the field.\n5. Reference Architectures: ️ Navigate through transfer learning architectures with ease.### Intended Audience\n\nThis hub is for:\n\n- Space Biologists: Integrating computational methodologies.\n- Data Scientists & Machine Learning Enthusiasts: Tackling challenges in space biology.\n- Students & Educators: Accessing resources for computational space biology.### ️ Contributing and Feedback\n\nWe believe in community-driven science. Your contributions are warmly welcomed! By joining hands, we can venture further into the mysteries of space biology.\n\n---\n\n Join us in this interstellar journey of melding computation and space biology, steering the future of life in space.## Table of Contents\n\n- BioGalactic Models\n- Datasets\n- Insights On BioGalacticModels Zoo Usage & Exploration\n- Transfer Learning Model Architectures for Space Biology\n- Demo: Predicting Viral Host based on Metagenomic Features\n\n---", "passage: ## BioGalactic Models\n\nBioGalactic Models is a dedicated Hugging Face space containing a curated collection of Biology & Biochemistry Foundation Models. \n\nSignificance to the BioGalactic Model Zoo:\n- Ready-to-use Models: These models are pre-trained, optimized for transfer learning tasks.\n- Diverse Applications: Focused on Biology & Biochemistry, catering to space biology.\n- Continuous Evolution: As space biology progresses, this space will evolve.\n\nImpacting Space Biology Exploration:\nThe models provide insights driving our understanding of life in space conditions. These include:\n- Decoding genomic sequences.\n- Predicting protein structures and interactions.\n- Analyzing metabolic pathways in space.\n\n---## Datasets\n\nDive into the curated datasets, specifically tailored for space biology studies. These datasets, coming directly from the vaults of NASA's GeneLab, cover a range of biological investigations relevant to space.### Whole Genome Sequencing Datasets\n\n1. Microbiome profiling of feces from mice flown on the RR-10 mission\n2. Metagenome profiling of feces from mice flown on the RR-23 mission\n3. Whole genome sequencing and assembly of Eukaryotic microbes isolated from ISS environmental surface, Kirovograd region soil, Chernobyl Nuclear Power Plant and Chernobyl Exclusion Zone\n4. Draft Genome Sequences of novel Agrobacterium genomospecies 3 Associated from the International Space Station\n5. Metagenomic analysis of feces from mice flown on the RR-6 mission\n6. Insta-Deep's Multi-species genome dataset### DNA Methylation Datasets\n\n1. Changes in DNA Methylation in Arabidopsis thaliana Plants Exposed Over Multiple Generations to Gamma Radiation\n2. Characterization of Epigenetic Regulation in an Extraterrestrial Environment: The Arabidopsis Spaceflight Methylome\n3. Ionizing radiation induces transgenerational effects of DNA methylation in zebrafish\n4. Methylome Analysis of Arabidopsis Seedlings Exposed to Microgravity\n\nFor an exhaustive list of datasets and other resources, explore NASA's Open Science Data Repository (OSDR).## Bulk Downloading GeneLab Datasets with genelab-utils### Quick Usage Guide# GeneLab utils\n\nSome helper programs for NASA GeneLab, such as 'GL-download-GLDS-data' for downloading files from a specific OSD or GLDS ID, and 'GL-get-workflow' for downloading workflows used by GeneLab for processing datasets.", "passage: ## Conda install\nThe genelab-utils package should be installed with conda/mamba. If you are not familiar with conda, you can find an introduction here if wanted, and if you are not familiar with mamba, there is a super-short introduction on that same page here if wanted – it's definitely worth using mamba if you use conda at all :+1: \n\n\n\n\nAll programs are prefixed with 'GL-' and have a help menu accessible with '-h'. Version info can be accessed with 'GL-version'.## Some example pages\n- Programmatically downloading GLDS data\n - 'GL-download-GLDS-data' \n- Downloading GeneLab workflows\n - 'GL-get-workflow' \n\n\n---## Insights On BioGalacticModels Zoo Usage & Exploration### 1. Preprocessing\nFor transfer learning these biomedical datasets may require various preprocessing steps depending on their source and format:\n- Data Cleaning: Removing noise and inconsistencies.\n- Normalization: Scaling features to a standard range.\n- Data Augmentation: Especially for image datasets, augmenting data can help improve model robustness.\n- Feature Selection/Extraction: Especially in genomics, where dimensionality can be very high.\n- Handling Imbalances: In some datasets, certain classes may be underrepresented.\n- Format Conversion: Datasets might need to be converted to formats compatible with machine learning frameworks.### 3. Potential Multimodal Data Combinations for Space Biology Knowledge Gain\nCombining different types of datasets, like genomic, proteomic, and transcriptomic data, can provide a holistic view of biological systems.\nAdditionally, integrating imaging data with molecular data can enhance our understanding of spatial-temporal patterns. \nMulti-modal datasets can help discover patterns or signals that might not be evident when analyzing data types in isolation.#### a. Genomic & Transcriptomic Data:\n- Why: While genomic data (like Whole Genome Sequencing) provides the blueprint of life, transcriptomic data offers insights into gene expression under specific conditions. Combining both can help in understanding the genetic basis of responses to space environments and how genes are expressed differently in space.#### b. Proteomic & Metabolomic Data:\n- Why: Proteomic data tells us about the proteins produced, while metabolomic data provides information on the small molecules in an organism. Together, they can offer insights into the functional state of cells in space, revealing which proteins are active and what metabolic pathways they're influencing.", "passage: #### c. Transcriptomic & Metabolomic Data:\n- Why: This combination can correlate gene expression with metabolic changes. It can be particularly insightful to understand how gene expression changes influence metabolic responses in space conditions.#### d. Genomic & Phenotypic Data:\n- Why: Connecting the genetic makeup with observable traits (phenotypes) can help in predicting how specific genetic variations might influence an organism's ability to thrive in space.#### e. Imaging & Transcriptomic Data:\n- Why: While transcriptomic data reveals gene expression, imaging (like MRI or microscopy) can show structural or functional changes in tissues or cells. Combined, they can link gene expression patterns with visual manifestations.#### f. Epigenomic & Transcriptomic Data:\n- Why: Epigenomic data, like DNA Methylation, reveals changes in gene activity not caused by DNA sequence changes. By combining it with transcriptomic data, one can understand how space conditions might epigenetically influence gene expression.#### g. Genomic & Proteomic Data:\n- Why: This combination can be used to understand the translation of genes to proteins under space conditions, offering insights into post-transcriptional modifications in space.#### h. Environmental Data & Any Biological Data:\n- Why: Combining data on the space environment (like radiation levels or microgravity conditions) with any biological dataset can help correlate external conditions with biological responses.\n\nThe task of organizing multimodal datasets may face the following challenges:\n\n1. Data Integration: Combining data from different sources and modalities can be challenging due to differences in scale, resolution, and format.\n2. Interpretability: While multi-modal data can provide richer insights, it can also make interpretations complex.\n3. Computational Needs: Integrating and analyzing multi-modal data often requires robust computational resources and specialized algorithms.\n\nHowever, the potential insights gained from such combinations, especially in understanding the complex biological responses to space conditions, can be invaluable.\nLeveraging transfer learning with models pretrained on diverse biomedical datasets and refined on space biology datasets can significantly boost the knowledge derived from these multi-modal combinations.\n\n\n\n---", "passage: ## Promising Transfer Learning Model Architectures for Space Biology\n\nThe deep learning domain has birthed numerous architectures tailor-made for transfer learning. These models, having trained on expansive datasets, excel at grasping general features, which can be specialized for niche tasks, such as those in space biology. Here's a selection of architectures ripe for exploration in this challenge:### 1. Convolutional Neural Networks (CNNs): \nPrimarily efficient for image-centric data.\n - VGG (e.g., VGG16, VGG19): Crafted by the Visual Geometry Group, it's a staple for image recognition.\n - ResNet: Features skip connections, countering the vanishing gradient dilemma in deep structures.\n - Inception (or GoogLeNet): Employs varied convolution sizes for multi-scale detail capture.\n - DenseNet: Innovatively links each layer to every subsequent one in a feed-forward manner.### 2. Transformers: \nOriginally for NLP, but have branched out to other areas like imagery.\n - BERT: Tailored for NLP, it's versatile for text-oriented tasks.\n - ViT (Vision Transformer): Modifies the transformer design for visual tasks.### 3. Recurrent Neural Networks (RNNs): \nBest suited for sequences such as time-series or biological sequences.\n - LSTM: Counters the standard RNN's vanishing gradient issue.\n - GRU: A streamlined LSTM variant.### 4. Autoencoders: \nFor unsupervised learning, adept at feature extraction from unlabeled content.\n - Variational Autoencoders (VAEs): Introduces a probabilistic layer to autoencoders, frequently in generative scenarios.### 5. Generative Adversarial Networks (GANs): \nIdeal for dataset augmentation, synthesizing data resembling the original distribution.### 6. U-Net: \nConceived for biomedical image segmentation, amalgamating a context-capturing contractive route with a precision-centric expanding one.### 7. Capsule Networks: \nNavigates the spatial hierarchy between simple and intricate objects in visuals, potentially invaluable for intricate biological imaging.### 8. EfficientNet: \nBalances network breadth, depth, and clarity using fixed scaling coefficients, creating potentially smaller yet more precise models." ]
64e7d209d29d04c39916a02f267b3bc16487f1ea
- This is a Japanese translation and reformatted version of ([OpenAssistant/oasst1](https://huggingface.co/datasets/OpenAssistant/oasst1)). - The original English dataset can be found here [OpenAssistant/oasst1](https://huggingface.co/datasets/OpenAssistant/oasst1). - And the dataset before reformatting can be found here [`kunishou/oasst1-89k-ja`](https://huggingface.co/datasets/kunishou/oasst1-89k-ja). - So, when you use this dataset, please also refer to and cite these datasets.
fujiki/oasst1-89k-ja-reformat-v1
[ "license:apache-2.0", "region:us" ]
2023-10-07T15:36:06+00:00
{"license": "apache-2.0", "dataset_info": {"features": [{"name": "dataset", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "instructions", "sequence": "string"}, {"name": "responses", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 58992730, "num_examples": 33919}], "download_size": 21655251, "dataset_size": 58992730}}
2023-10-18T07:59:55+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
- This is a Japanese translation and reformatted version of (OpenAssistant/oasst1). - The original English dataset can be found here OpenAssistant/oasst1. - And the dataset before reformatting can be found here 'kunishou/oasst1-89k-ja'. - So, when you use this dataset, please also refer to and cite these datasets.
[]
[ "TAGS\n#license-apache-2.0 #region-us \n" ]
[ 14 ]
[ "passage: TAGS\n#license-apache-2.0 #region-us \n" ]
87952168218a00f930b7906739fb8bd7a2ca1f06
dataset_info: features: - name: input dtype: string - name: output dtype: string - name: table dtype: string # Dataset Card for "Llama-2-JKT48-FP" This dataset is intended to provide LLaMA 2 improved coding and instruction following capabilities, with a specific focus on JKT$* knowledges. The dataset is created for exercising training llama2.
famepram/llama-2-jk48-demo
[ "license:other", "region:us" ]
2023-10-07T15:45:09+00:00
{"license": "other", "license_name": "readme.md", "license_link": "LICENSE"}
2023-11-08T04:14:12+00:00
[]
[]
TAGS #license-other #region-us
dataset_info: features: - name: input dtype: string - name: output dtype: string - name: table dtype: string # Dataset Card for "Llama-2-JKT48-FP" This dataset is intended to provide LLaMA 2 improved coding and instruction following capabilities, with a specific focus on JKT$* knowledges. The dataset is created for exercising training llama2.
[ "# Dataset Card for \"Llama-2-JKT48-FP\"\n\nThis dataset is intended to provide LLaMA 2 improved coding and instruction following capabilities, with a specific focus on JKT$* knowledges.\n\nThe dataset is created for exercising training llama2." ]
[ "TAGS\n#license-other #region-us \n", "# Dataset Card for \"Llama-2-JKT48-FP\"\n\nThis dataset is intended to provide LLaMA 2 improved coding and instruction following capabilities, with a specific focus on JKT$* knowledges.\n\nThe dataset is created for exercising training llama2." ]
[ 11, 61 ]
[ "passage: TAGS\n#license-other #region-us \n# Dataset Card for \"Llama-2-JKT48-FP\"\n\nThis dataset is intended to provide LLaMA 2 improved coding and instruction following capabilities, with a specific focus on JKT$* knowledges.\n\nThe dataset is created for exercising training llama2." ]
b43b91bfdcc60e8edb14a59dca0d99a1b97ba2a7
# Dataset Card for "aesir-test69" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
towhid/aesir-test69
[ "region:us" ]
2023-10-07T15:46:11+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 22114, "num_examples": 10}], "download_size": 28277, "dataset_size": 22114}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-07T17:20:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "aesir-test69" More Information needed
[ "# Dataset Card for \"aesir-test69\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"aesir-test69\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"aesir-test69\"\n\nMore Information needed" ]
b16d08b7da4340b6faddb651de6048fe0a910082
# Dataset Card for "test_dataset_20231007_171958" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tr416/test_dataset_20231007_171958
[ "region:us" ]
2023-10-07T16:19:58+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 762696.0, "num_examples": 297}, {"name": "test", "num_bytes": 7704.0, "num_examples": 3}], "download_size": 73618, "dataset_size": 770400.0}}
2023-10-07T16:20:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test_dataset_20231007_171958" More Information needed
[ "# Dataset Card for \"test_dataset_20231007_171958\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test_dataset_20231007_171958\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test_dataset_20231007_171958\"\n\nMore Information needed" ]
54dda3ec145dad59795f4d9c2c362bf5d9f7c5a6
# Dataset Card for "test2_dataset_20231007_172035" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tr416/test2_dataset_20231007_172035
[ "region:us" ]
2023-10-07T16:20:35+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 762696.0, "num_examples": 297}, {"name": "test", "num_bytes": 7704.0, "num_examples": 3}], "download_size": 73851, "dataset_size": 770400.0}}
2023-10-07T16:20:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test2_dataset_20231007_172035" More Information needed
[ "# Dataset Card for \"test2_dataset_20231007_172035\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test2_dataset_20231007_172035\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test2_dataset_20231007_172035\"\n\nMore Information needed" ]
bbccb10fac2b5aa51e260e95451eee95c6b9189b
# Dataset Card for "embeddings_from_distilbert_class_heaps_and_eval_part1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
johannes-garstenauer/embeddings_from_distilbert_class_heaps_and_eval_part1
[ "region:us" ]
2023-10-07T16:25:48+00:00
{"dataset_info": {"features": [{"name": "struct", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "pred", "dtype": "int64"}, {"name": "cls_layer_6", "sequence": "float32"}, {"name": "cls_layer_5", "sequence": "float32"}, {"name": "cls_layer_4", "sequence": "float32"}], "splits": [{"name": "train", "num_bytes": 1281395185, "num_examples": 134495}], "download_size": 1491856532, "dataset_size": 1281395185}}
2023-10-07T16:27:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "embeddings_from_distilbert_class_heaps_and_eval_part1" More Information needed
[ "# Dataset Card for \"embeddings_from_distilbert_class_heaps_and_eval_part1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"embeddings_from_distilbert_class_heaps_and_eval_part1\"\n\nMore Information needed" ]
[ 6, 32 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"embeddings_from_distilbert_class_heaps_and_eval_part1\"\n\nMore Information needed" ]
90ab440fb28104a62910bfd6916a989e0eaf42fc
# Dataset Card for "paragraphss_paraphrasing" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ismailiismail/paragraphss_paraphrasing
[ "region:us" ]
2023-10-07T16:57:56+00:00
{"dataset_info": {"features": [{"name": "phrase", "dtype": "string"}, {"name": "paraphrase", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1848761, "num_examples": 1000}], "download_size": 963985, "dataset_size": 1848761}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-07T18:59:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "paragraphss_paraphrasing" More Information needed
[ "# Dataset Card for \"paragraphss_paraphrasing\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"paragraphss_paraphrasing\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"paragraphss_paraphrasing\"\n\nMore Information needed" ]
bedd5923d341a06bc48f65f3801ba2a76883f8fe
# Dataset Card for "test_topicbasednli" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
manu/topic_based_nli_test
[ "region:us" ]
2023-10-07T17:11:10+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}, {"split": "valid", "path": "data/valid-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "topic", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "polarity", "dtype": "string"}, {"name": "place_name", "dtype": "string"}, {"name": "industry", "dtype": "string"}, {"name": "rating", "dtype": "int64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 283400, "num_examples": 600}, {"name": "valid", "num_bytes": 26815, "num_examples": 60}], "download_size": 147426, "dataset_size": 310215}}
2024-01-21T09:55:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test_topicbasednli" More Information needed
[ "# Dataset Card for \"test_topicbasednli\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test_topicbasednli\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test_topicbasednli\"\n\nMore Information needed" ]
2a3f5043e4891175b211747a584ec305de522238
# Dataset Card for "aesir-test420" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
towhid/aesir-test420
[ "region:us" ]
2023-10-07T17:20:08+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 7240, "num_examples": 17}], "download_size": 6311, "dataset_size": 7240}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]}
2023-10-07T17:20:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "aesir-test420" More Information needed
[ "# Dataset Card for \"aesir-test420\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"aesir-test420\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"aesir-test420\"\n\nMore Information needed" ]
21be1afeec7e13f23c2b2baf7cc05d330ddc092d
# Bangumi Image Base of Free! -eternal Summer- This is the image base of bangumi Free! -Eternal Summer-, we detected 24 characters, 2471 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 411 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 274 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 32 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 105 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 215 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 37 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 23 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 45 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 284 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 36 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 54 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 36 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 9 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 238 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 19 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 306 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 11 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 118 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 12 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 14 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 14 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 37 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 5 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | N/A | N/A | N/A | | noise | 136 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/freeeternalsummer
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-07T17:29:01+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-07T18:53:45+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Free! -eternal Summer- ============================================ This is the image base of bangumi Free! -Eternal Summer-, we detected 24 characters, 2471 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
9cf9f86f5d5bb43390cc78dd27ceb185f0bbea73
# Dataset Card for "ncbi_genbank_part_11" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Hack90/ncbi_genbank_part_11
[ "region:us" ]
2023-10-07T17:30:57+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "sequence", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "features", "dtype": "int64"}, {"name": "seq_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1891275103, "num_examples": 169329}], "download_size": 841760730, "dataset_size": 1891275103}}
2023-10-07T17:31:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ncbi_genbank_part_11" More Information needed
[ "# Dataset Card for \"ncbi_genbank_part_11\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ncbi_genbank_part_11\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ncbi_genbank_part_11\"\n\nMore Information needed" ]
5e3cff23442b9ad8f9780d86142809391f0c70b0
# Dataset Card for "ncbi_genbank_part_73" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Hack90/ncbi_genbank_part_73
[ "region:us" ]
2023-10-07T17:39:17+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "sequence", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "features", "dtype": "int64"}, {"name": "seq_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 31446287535, "num_examples": 1129212}], "download_size": 14015101306, "dataset_size": 31446287535}}
2023-10-07T17:51:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ncbi_genbank_part_73" More Information needed
[ "# Dataset Card for \"ncbi_genbank_part_73\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ncbi_genbank_part_73\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ncbi_genbank_part_73\"\n\nMore Information needed" ]
ff6ee0989147eb202d18dd5d7423f2361f02327a
# Bangumi Image Base of Il Sole Penetra Le Illusioni This is the image base of bangumi il sole penetra le illusioni, we detected 26 characters, 1875 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 82 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 144 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 47 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 26 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 23 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 27 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 41 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 18 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 12 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 11 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 73 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 17 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 152 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 11 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 14 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 75 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 12 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 206 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 60 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 16 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 12 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 28 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 53 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 8 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 29 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | noise | 678 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/ilsolepenetraleillusioni
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-07T17:40:21+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-07T18:58:38+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Il Sole Penetra Le Illusioni ================================================== This is the image base of bangumi il sole penetra le illusioni, we detected 26 characters, 1875 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
135f0cdb7821b03ee4991615221f0318f26c17ea
## Dataset Description ### Task Summary Aspect Sentiment Triplet Extraction (ASTE) is the task of extracting the triplets of target entities, their associated sentiment, and opinion spans explaining the reason for the sentiment. This task is firstly proposed by (Peng et al., 2020) in the paper [Knowing What, How and Why: A Near Complete Solution for Aspect-based Sentiment Analysis (In AAAI 2020)](https://arxiv.org/abs/1911.01616). For Example, given the sentence: > The screen is very large and crystal clear with amazing colors and resolution . The objective of the Aspect Sentiment Triplet Extraction (ASTE) task is to predict the triplets: > [('screen', 'large', 'Positive'), ('screen', 'clear', 'Positive'), ('colors', 'amazing', 'Positive'), ('resolution', 'amazing', 'Positive')] where a triplet consists of (target, opinion, sentiment). ### Dataset Summary Sentiment analysis is increasingly viewed as a vital task both from an academic and a commercial standpoint. The majority of current approaches, however, attempt to detect the overall polarity of a sentence, paragraph, or text span, regardless of the entities mentioned (e.g., laptops, restaurants) and their aspects (e.g., battery, screen; food, service). By contrast, this task is concerned with aspect based sentiment analysis (ABSA), where the goal is to identify the aspects of given target entities and the sentiment expressed towards each aspect. This dataset consists of customer reviews with human-authored annotations identifying the mentioned aspects of the target entities and the sentiment polarity of each aspect. ### Dataset Source The ASTE dataset is from the [xuuuluuu/SemEval-Triplet-data](https://github.com/xuuuluuu/SemEval-Triplet-data) repository. It is based on the Sem Eval 2014, 2015 and 2016 datasets, with some preprocessing applied to the text. * [Sem Eval 2014 Task 4](https://alt.qcri.org/semeval2014/task4/) * [Sem Eval 2015 Task 12](https://alt.qcri.org/semeval2015/task12/) * [Sem Eval 2016 Task 5](https://alt.qcri.org/semeval2016/task5/) ### Dataset Details The train, validation and test splits come from the ASTE dataset. There are the following columns: * index The ASTE and Sem Eval datasets had multiple annotations per document. This dataset has a single annotation per row. To make it easier to collect all annotations for a document the index can be used to group them. All annotations for a given document will have the same index. * text This is the document that is annotated, either in the ASTE form or in the Sem Eval form (see below for details). * aspect_start_index The zero based character index for the first letter of the aspect term * aspect_end_index The zero based character index for the last letter of the aspect term * aspect_term The aspect term as it appears in the text * opinion_start_index The zero based character index for the first letter of the opinion term * opinion_end_index The zero based character index for the last letter of the opinion term * opinion_term The opinion term as it appears in the text * sentiment The sentiment class for the opinion about the aspect. One of _negative_, _neutral_ or _positive_. The ASTE dataset involved preprocessing the SemEval text. This preprocessing fixed some of the spelling mistakes, for example: > Keyboard good sized and wasy to use. (easy misspelt as wasy). The preprocessing also includes tokenization of the text and then separating the tokens with whitespace, for example: > It 's just as fast with one program open as it is with sixteen open . Since the added whitespace can lead to unnatrual text I have provided two forms of the dataset. Subsets that end with `aste-v2` have the preprocessed text with spelling correction and additional whitespace. Subsets that end with `sem-eval` have the original Sem Eval text. ### Citation Information ``` @misc{xu2021learning, title={Learning Span-Level Interactions for Aspect Sentiment Triplet Extraction}, author={Lu Xu and Yew Ken Chia and Lidong Bing}, year={2021}, eprint={2107.12214}, archivePrefix={arXiv}, primaryClass={cs.CL} } @misc{xu2021positionaware, title={Position-Aware Tagging for Aspect Sentiment Triplet Extraction}, author={Lu Xu and Hao Li and Wei Lu and Lidong Bing}, year={2021}, eprint={2010.02609}, archivePrefix={arXiv}, primaryClass={cs.CL} } @misc{peng2019knowing, title={Knowing What, How and Why: A Near Complete Solution for Aspect-based Sentiment Analysis}, author={Haiyun Peng and Lu Xu and Lidong Bing and Fei Huang and Wei Lu and Luo Si}, year={2019}, eprint={1911.01616}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
matthewfranglen/aste-v2
[ "task_categories:token-classification", "task_categories:text-classification", "size_categories:1K<n<10K", "language:en", "arxiv:1911.01616", "arxiv:2107.12214", "arxiv:2010.02609", "region:us" ]
2023-10-07T17:45:31+00:00
{"language": ["en"], "size_categories": ["1K<n<10K"], "task_categories": ["token-classification", "text-classification"], "pretty_name": "Aspect Sentiment Triplet Extraction v2", "arxiv": [2107.12214, 2010.02609, 1911.01616], "configs": [{"config_name": "2014-laptop-sem-eval", "data_files": [{"split": "train", "path": "data/2014/laptop/sem-eval/train.gz.parquet"}, {"split": "valid", "path": "data/2014/laptop/sem-eval/valid.gz.parquet"}, {"split": "test", "path": "data/2014/laptop/sem-eval/test.gz.parquet"}]}, {"config_name": "2014-laptop-aste-v2", "data_files": [{"split": "train", "path": "data/2014/laptop/aste/train.gz.parquet"}, {"split": "valid", "path": "data/2014/laptop/aste/valid.gz.parquet"}, {"split": "test", "path": "data/2014/laptop/aste/test.gz.parquet"}]}, {"config_name": "2014-restaurant-sem-eval", "data_files": [{"split": "train", "path": "data/2014/restaurant/sem-eval/train.gz.parquet"}, {"split": "valid", "path": "data/2014/restaurant/sem-eval/valid.gz.parquet"}, {"split": "test", "path": "data/2014/restaurant/sem-eval/test.gz.parquet"}]}, {"config_name": "2014-restaurant-aste-v2", "data_files": [{"split": "train", "path": "data/2014/restaurant/aste/train.gz.parquet"}, {"split": "valid", "path": "data/2014/restaurant/aste/valid.gz.parquet"}, {"split": "test", "path": "data/2014/restaurant/aste/test.gz.parquet"}]}, {"config_name": "2015-restaurant-sem-eval", "data_files": [{"split": "train", "path": "data/2015/restaurant/sem-eval/train.gz.parquet"}, {"split": "valid", "path": "data/2015/restaurant/sem-eval/valid.gz.parquet"}, {"split": "test", "path": "data/2015/restaurant/sem-eval/test.gz.parquet"}]}, {"config_name": "2015-restaurant-aste-v2", "data_files": [{"split": "train", "path": "data/2015/restaurant/aste/train.gz.parquet"}, {"split": "valid", "path": "data/2015/restaurant/aste/valid.gz.parquet"}, {"split": "test", "path": "data/2015/restaurant/aste/test.gz.parquet"}]}, {"config_name": "2016-restaurant-sem-eval", "data_files": [{"split": "train", "path": "data/2016/restaurant/sem-eval/train.gz.parquet"}, {"split": "valid", "path": "data/2016/restaurant/sem-eval/valid.gz.parquet"}, {"split": "test", "path": "data/2016/restaurant/sem-eval/test.gz.parquet"}]}, {"config_name": "2016-restaurant-aste-v2", "data_files": [{"split": "train", "path": "data/2016/restaurant/aste/train.gz.parquet"}, {"split": "valid", "path": "data/2016/restaurant/aste/valid.gz.parquet"}, {"split": "test", "path": "data/2016/restaurant/aste/test.gz.parquet"}]}]}
2023-10-09T09:05:10+00:00
[ "1911.01616", "2107.12214", "2010.02609" ]
[ "en" ]
TAGS #task_categories-token-classification #task_categories-text-classification #size_categories-1K<n<10K #language-English #arxiv-1911.01616 #arxiv-2107.12214 #arxiv-2010.02609 #region-us
## Dataset Description ### Task Summary Aspect Sentiment Triplet Extraction (ASTE) is the task of extracting the triplets of target entities, their associated sentiment, and opinion spans explaining the reason for the sentiment. This task is firstly proposed by (Peng et al., 2020) in the paper Knowing What, How and Why: A Near Complete Solution for Aspect-based Sentiment Analysis (In AAAI 2020). For Example, given the sentence: > The screen is very large and crystal clear with amazing colors and resolution . The objective of the Aspect Sentiment Triplet Extraction (ASTE) task is to predict the triplets: > [('screen', 'large', 'Positive'), ('screen', 'clear', 'Positive'), ('colors', 'amazing', 'Positive'), ('resolution', 'amazing', 'Positive')] where a triplet consists of (target, opinion, sentiment). ### Dataset Summary Sentiment analysis is increasingly viewed as a vital task both from an academic and a commercial standpoint. The majority of current approaches, however, attempt to detect the overall polarity of a sentence, paragraph, or text span, regardless of the entities mentioned (e.g., laptops, restaurants) and their aspects (e.g., battery, screen; food, service). By contrast, this task is concerned with aspect based sentiment analysis (ABSA), where the goal is to identify the aspects of given target entities and the sentiment expressed towards each aspect. This dataset consists of customer reviews with human-authored annotations identifying the mentioned aspects of the target entities and the sentiment polarity of each aspect. ### Dataset Source The ASTE dataset is from the xuuuluuu/SemEval-Triplet-data repository. It is based on the Sem Eval 2014, 2015 and 2016 datasets, with some preprocessing applied to the text. * Sem Eval 2014 Task 4 * Sem Eval 2015 Task 12 * Sem Eval 2016 Task 5 ### Dataset Details The train, validation and test splits come from the ASTE dataset. There are the following columns: * index The ASTE and Sem Eval datasets had multiple annotations per document. This dataset has a single annotation per row. To make it easier to collect all annotations for a document the index can be used to group them. All annotations for a given document will have the same index. * text This is the document that is annotated, either in the ASTE form or in the Sem Eval form (see below for details). * aspect_start_index The zero based character index for the first letter of the aspect term * aspect_end_index The zero based character index for the last letter of the aspect term * aspect_term The aspect term as it appears in the text * opinion_start_index The zero based character index for the first letter of the opinion term * opinion_end_index The zero based character index for the last letter of the opinion term * opinion_term The opinion term as it appears in the text * sentiment The sentiment class for the opinion about the aspect. One of _negative_, _neutral_ or _positive_. The ASTE dataset involved preprocessing the SemEval text. This preprocessing fixed some of the spelling mistakes, for example: > Keyboard good sized and wasy to use. (easy misspelt as wasy). The preprocessing also includes tokenization of the text and then separating the tokens with whitespace, for example: > It 's just as fast with one program open as it is with sixteen open . Since the added whitespace can lead to unnatrual text I have provided two forms of the dataset. Subsets that end with 'aste-v2' have the preprocessed text with spelling correction and additional whitespace. Subsets that end with 'sem-eval' have the original Sem Eval text.
[ "## Dataset Description", "### Task Summary\n\nAspect Sentiment Triplet Extraction (ASTE) is the task of extracting the triplets of target entities, their associated sentiment, and opinion spans explaining the reason for the sentiment.\nThis task is firstly proposed by (Peng et al., 2020) in the paper Knowing What, How and Why: A Near Complete Solution for Aspect-based Sentiment Analysis (In AAAI 2020).\n\nFor Example, given the sentence:\n\n> The screen is very large and crystal clear with amazing colors and resolution .\n\nThe objective of the Aspect Sentiment Triplet Extraction (ASTE) task is to predict the triplets:\n\n> [('screen', 'large', 'Positive'), ('screen', 'clear', 'Positive'), ('colors', 'amazing', 'Positive'), ('resolution', 'amazing', 'Positive')]\n\nwhere a triplet consists of (target, opinion, sentiment).", "### Dataset Summary\n\nSentiment analysis is increasingly viewed as a vital task both from an academic and a commercial standpoint.\nThe majority of current approaches, however, attempt to detect the overall polarity of a sentence, paragraph, or text span, regardless of the entities mentioned (e.g., laptops, restaurants) and their aspects (e.g., battery, screen; food, service).\nBy contrast, this task is concerned with aspect based sentiment analysis (ABSA), where the goal is to identify the aspects of given target entities and the sentiment expressed towards each aspect.\nThis dataset consists of customer reviews with human-authored annotations identifying the mentioned aspects of the target entities and the sentiment polarity of each aspect.", "### Dataset Source\n\nThe ASTE dataset is from the xuuuluuu/SemEval-Triplet-data repository.\n\nIt is based on the Sem Eval 2014, 2015 and 2016 datasets, with some preprocessing applied to the text.\n\n * Sem Eval 2014 Task 4\n * Sem Eval 2015 Task 12\n * Sem Eval 2016 Task 5", "### Dataset Details\n\nThe train, validation and test splits come from the ASTE dataset.\nThere are the following columns:\n\n * index\n The ASTE and Sem Eval datasets had multiple annotations per document.\n This dataset has a single annotation per row.\n To make it easier to collect all annotations for a document the index can be used to group them.\n All annotations for a given document will have the same index.\n\n * text\n This is the document that is annotated, either in the ASTE form or in the Sem Eval form (see below for details).\n\n * aspect_start_index\n The zero based character index for the first letter of the aspect term\n\n * aspect_end_index\n The zero based character index for the last letter of the aspect term\n\n * aspect_term\n The aspect term as it appears in the text\n\n * opinion_start_index\n The zero based character index for the first letter of the opinion term\n\n * opinion_end_index\n The zero based character index for the last letter of the opinion term\n\n * opinion_term\n The opinion term as it appears in the text\n\n * sentiment\n The sentiment class for the opinion about the aspect.\n One of _negative_, _neutral_ or _positive_.\n\nThe ASTE dataset involved preprocessing the SemEval text.\nThis preprocessing fixed some of the spelling mistakes, for example:\n\n> Keyboard good sized and wasy to use.\n\n(easy misspelt as wasy).\n\nThe preprocessing also includes tokenization of the text and then separating the tokens with whitespace, for example:\n\n> It 's just as fast with one program open as it is with sixteen open .\n\nSince the added whitespace can lead to unnatrual text I have provided two forms of the dataset.\nSubsets that end with 'aste-v2' have the preprocessed text with spelling correction and additional whitespace.\nSubsets that end with 'sem-eval' have the original Sem Eval text." ]
[ "TAGS\n#task_categories-token-classification #task_categories-text-classification #size_categories-1K<n<10K #language-English #arxiv-1911.01616 #arxiv-2107.12214 #arxiv-2010.02609 #region-us \n", "## Dataset Description", "### Task Summary\n\nAspect Sentiment Triplet Extraction (ASTE) is the task of extracting the triplets of target entities, their associated sentiment, and opinion spans explaining the reason for the sentiment.\nThis task is firstly proposed by (Peng et al., 2020) in the paper Knowing What, How and Why: A Near Complete Solution for Aspect-based Sentiment Analysis (In AAAI 2020).\n\nFor Example, given the sentence:\n\n> The screen is very large and crystal clear with amazing colors and resolution .\n\nThe objective of the Aspect Sentiment Triplet Extraction (ASTE) task is to predict the triplets:\n\n> [('screen', 'large', 'Positive'), ('screen', 'clear', 'Positive'), ('colors', 'amazing', 'Positive'), ('resolution', 'amazing', 'Positive')]\n\nwhere a triplet consists of (target, opinion, sentiment).", "### Dataset Summary\n\nSentiment analysis is increasingly viewed as a vital task both from an academic and a commercial standpoint.\nThe majority of current approaches, however, attempt to detect the overall polarity of a sentence, paragraph, or text span, regardless of the entities mentioned (e.g., laptops, restaurants) and their aspects (e.g., battery, screen; food, service).\nBy contrast, this task is concerned with aspect based sentiment analysis (ABSA), where the goal is to identify the aspects of given target entities and the sentiment expressed towards each aspect.\nThis dataset consists of customer reviews with human-authored annotations identifying the mentioned aspects of the target entities and the sentiment polarity of each aspect.", "### Dataset Source\n\nThe ASTE dataset is from the xuuuluuu/SemEval-Triplet-data repository.\n\nIt is based on the Sem Eval 2014, 2015 and 2016 datasets, with some preprocessing applied to the text.\n\n * Sem Eval 2014 Task 4\n * Sem Eval 2015 Task 12\n * Sem Eval 2016 Task 5", "### Dataset Details\n\nThe train, validation and test splits come from the ASTE dataset.\nThere are the following columns:\n\n * index\n The ASTE and Sem Eval datasets had multiple annotations per document.\n This dataset has a single annotation per row.\n To make it easier to collect all annotations for a document the index can be used to group them.\n All annotations for a given document will have the same index.\n\n * text\n This is the document that is annotated, either in the ASTE form or in the Sem Eval form (see below for details).\n\n * aspect_start_index\n The zero based character index for the first letter of the aspect term\n\n * aspect_end_index\n The zero based character index for the last letter of the aspect term\n\n * aspect_term\n The aspect term as it appears in the text\n\n * opinion_start_index\n The zero based character index for the first letter of the opinion term\n\n * opinion_end_index\n The zero based character index for the last letter of the opinion term\n\n * opinion_term\n The opinion term as it appears in the text\n\n * sentiment\n The sentiment class for the opinion about the aspect.\n One of _negative_, _neutral_ or _positive_.\n\nThe ASTE dataset involved preprocessing the SemEval text.\nThis preprocessing fixed some of the spelling mistakes, for example:\n\n> Keyboard good sized and wasy to use.\n\n(easy misspelt as wasy).\n\nThe preprocessing also includes tokenization of the text and then separating the tokens with whitespace, for example:\n\n> It 's just as fast with one program open as it is with sixteen open .\n\nSince the added whitespace can lead to unnatrual text I have provided two forms of the dataset.\nSubsets that end with 'aste-v2' have the preprocessed text with spelling correction and additional whitespace.\nSubsets that end with 'sem-eval' have the original Sem Eval text." ]
[ 70, 4, 223, 164, 81, 429 ]
[ "passage: TAGS\n#task_categories-token-classification #task_categories-text-classification #size_categories-1K<n<10K #language-English #arxiv-1911.01616 #arxiv-2107.12214 #arxiv-2010.02609 #region-us \n## Dataset Description### Task Summary\n\nAspect Sentiment Triplet Extraction (ASTE) is the task of extracting the triplets of target entities, their associated sentiment, and opinion spans explaining the reason for the sentiment.\nThis task is firstly proposed by (Peng et al., 2020) in the paper Knowing What, How and Why: A Near Complete Solution for Aspect-based Sentiment Analysis (In AAAI 2020).\n\nFor Example, given the sentence:\n\n> The screen is very large and crystal clear with amazing colors and resolution .\n\nThe objective of the Aspect Sentiment Triplet Extraction (ASTE) task is to predict the triplets:\n\n> [('screen', 'large', 'Positive'), ('screen', 'clear', 'Positive'), ('colors', 'amazing', 'Positive'), ('resolution', 'amazing', 'Positive')]\n\nwhere a triplet consists of (target, opinion, sentiment).### Dataset Summary\n\nSentiment analysis is increasingly viewed as a vital task both from an academic and a commercial standpoint.\nThe majority of current approaches, however, attempt to detect the overall polarity of a sentence, paragraph, or text span, regardless of the entities mentioned (e.g., laptops, restaurants) and their aspects (e.g., battery, screen; food, service).\nBy contrast, this task is concerned with aspect based sentiment analysis (ABSA), where the goal is to identify the aspects of given target entities and the sentiment expressed towards each aspect.\nThis dataset consists of customer reviews with human-authored annotations identifying the mentioned aspects of the target entities and the sentiment polarity of each aspect." ]
64a4ee0630134df3e5eb990ae50d17a2df49d337
# Dataset Card for "tri-edu-date" Left: 3429 rows - 0.09% [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
H4438/tri-edu-date
[ "region:us" ]
2023-10-07T18:05:11+00:00
{"dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "dates", "sequence": "string"}, {"name": "body", "dtype": "string"}, {"name": "head", "dtype": "string"}, {"name": "est_date", "dtype": "string"}, {"name": "ext_dates", "sequence": "string"}, {"name": "flt_dates", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 214580613, "num_examples": 37239}], "download_size": 0, "dataset_size": 214580613}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-08T17:14:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tri-edu-date" Left: 3429 rows - 0.09% More Information needed
[ "# Dataset Card for \"tri-edu-date\"\nLeft: 3429 rows - 0.09%\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tri-edu-date\"\nLeft: 3429 rows - 0.09%\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"tri-edu-date\"\nLeft: 3429 rows - 0.09%\n\nMore Information needed" ]
2c16765da54640e7a63a6ad52a8c1ca6609589ba
# Dataset Card for "ncbi_genbank_part_46" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Hack90/ncbi_genbank_part_46
[ "region:us" ]
2023-10-07T18:05:52+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "sequence", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "features", "dtype": "int64"}, {"name": "seq_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 45473431595, "num_examples": 198370}], "download_size": 20050383599, "dataset_size": 45473431595}}
2023-10-07T18:46:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ncbi_genbank_part_46" More Information needed
[ "# Dataset Card for \"ncbi_genbank_part_46\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ncbi_genbank_part_46\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ncbi_genbank_part_46\"\n\nMore Information needed" ]
52ad3aef881c29180b932b4bacae87c98622ca66
# Dataset Card for "ncbi_genbank_part_74" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Hack90/ncbi_genbank_part_74
[ "region:us" ]
2023-10-07T18:09:37+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "sequence", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "features", "dtype": "int64"}, {"name": "seq_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 33100376103, "num_examples": 414925}], "download_size": 14899366001, "dataset_size": 33100376103}}
2023-10-07T18:24:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ncbi_genbank_part_74" More Information needed
[ "# Dataset Card for \"ncbi_genbank_part_74\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ncbi_genbank_part_74\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ncbi_genbank_part_74\"\n\nMore Information needed" ]
495f24c45b368c82caf8b0076fece4a2fb491a91
# Dataset Card for "colorization_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ummagumm-a/colorization_dataset
[ "region:us" ]
2023-10-07T18:13:33+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "conditioning_image", "sequence": {"sequence": {"sequence": "uint8"}}}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 333261193.0, "num_examples": 1000}], "download_size": 127051514, "dataset_size": 333261193.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-07T18:22:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "colorization_dataset" More Information needed
[ "# Dataset Card for \"colorization_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"colorization_dataset\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"colorization_dataset\"\n\nMore Information needed" ]
5185f04f445f4b86898107567d845b299f1ebb5b
# Dataset Card for "Mixed-Arabic-Dataset-Main-Test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
M-A-D/Mixed-Arabic-Dataset-Main-Test
[ "region:us" ]
2023-10-07T18:17:51+00:00
{"dataset_info": {"features": [{"name": "GenId", "dtype": "int64"}, {"name": "SubId", "dtype": "int64"}, {"name": "DatasetName", "dtype": "string"}, {"name": "DatasetLink", "dtype": "string"}, {"name": "Text", "dtype": "string"}, {"name": "MetaData", "struct": [{"name": "AboutAuthor", "dtype": "null"}, {"name": "AboutBook", "dtype": "null"}, {"name": "Author", "dtype": "null"}, {"name": "AuthorName", "dtype": "null"}, {"name": "BookLink", "dtype": "null"}, {"name": "BookName", "dtype": "null"}, {"name": "ChapterLink", "dtype": "null"}, {"name": "ChapterName", "dtype": "null"}, {"name": "Tags", "dtype": "null"}, {"name": "__index_level_0__", "dtype": "float64"}, {"name": "created_date", "dtype": "string"}, {"name": "deleted", "dtype": "bool"}, {"name": "detoxify", "dtype": "null"}, {"name": "emojis", "struct": [{"name": "count", "sequence": "int32"}, {"name": "name", "sequence": "string"}]}, {"name": "id", "dtype": "string"}, {"name": "labels", "struct": [{"name": "count", "sequence": "int32"}, {"name": "name", "sequence": "string"}, {"name": "value", "sequence": "float64"}]}, {"name": "lang", "dtype": "string"}, {"name": "message_id", "dtype": "string"}, {"name": "message_tree_id", "dtype": "string"}, {"name": "model_name", "dtype": "null"}, {"name": "parent_id", "dtype": "string"}, {"name": "query_id", "dtype": "null"}, {"name": "rank", "dtype": "float64"}, {"name": "review_count", "dtype": "float64"}, {"name": "review_result", "dtype": "bool"}, {"name": "role", "dtype": "string"}, {"name": "synthetic", "dtype": "bool"}, {"name": "title", "dtype": "null"}, {"name": "tree_state", "dtype": "string"}, {"name": "url", "dtype": "null"}, {"name": "user_id", "dtype": "string"}]}, {"name": "ConcatenatedText", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 96491917, "num_examples": 71935}], "download_size": 37192033, "dataset_size": 96491917}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-07T18:17:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Mixed-Arabic-Dataset-Main-Test" More Information needed
[ "# Dataset Card for \"Mixed-Arabic-Dataset-Main-Test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Mixed-Arabic-Dataset-Main-Test\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Mixed-Arabic-Dataset-Main-Test\"\n\nMore Information needed" ]
a26579d66f958f35019cf4fba9d1d37e6cb8cfb9
# Bangumi Image Base of Zombie Land Saga Revenge This is the image base of bangumi Zombie Land Saga Revenge, we detected 36 characters, 2401 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 127 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 86 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 40 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 80 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 18 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 12 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 61 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 60 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 35 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 40 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 61 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 58 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 31 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 43 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 22 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 10 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 13 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 5 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | N/A | N/A | N/A | | 18 | 217 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 46 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 229 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 40 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 87 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 18 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 20 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 57 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 21 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 13 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 196 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 49 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 30 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 92 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 184 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 8 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 8 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | noise | 284 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/zombielandsagarevenge
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-07T18:19:50+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-07T19:45:00+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Zombie Land Saga Revenge ============================================== This is the image base of bangumi Zombie Land Saga Revenge, we detected 36 characters, 2401 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
d0b6302a52b46b4fe199cdf0fe3a5deb9ea93c2a
Whether God Is Composed of Matter and Form? Objection 1: It seems that God is composed of matter and form. For whatever has a soul is composed of matter and form; since the soul is the form of the body. But Scripture attributes a soul to God; for it is mentioned in Hebrews (Heb. 10:38), where God says: "But My just man liveth by faith; but if he withdraw himself, he shall not please My soul." Therefore God is composed of matter and form. Objection 2: Further, anger, joy and the like are passions of the composite. But these are attributed to God in Scripture: "The Lord was exceeding angry with His people" (Ps. 105:40). Therefore God is composed of matter and form. Objection 3: Further, matter is the principle of individualization. But God seems to be individual, for He cannot be predicated of many. Therefore He is composed of matter and form. Contrary: Whatever is composed of matter and form is a body; for dimensive quantity is the first property of matter. But God is not a body as proved in the preceding Article; therefore He is not composed of matter and form. Response: It is impossible that matter should exist in God. First, because matter is in potentiality. But we have shown (Q. 2, A. 3) that God is pure act, without any potentiality. Hence it is impossible that God should be composed of matter and form. Secondly, because everything composed of matter and form owes its perfection and goodness to its form; therefore its goodness is participated, inasmuch as matter participates the form. Now the first good and the best--viz. God--is not a participated good, because the essential good is prior to the participated good. Hence it is impossible that God should be composed of matter and form. Thirdly, because every agent acts by its form; hence the manner in which it has its form is the manner in which it is an agent. Therefore whatever is primarily and essentially an agent must be primarily and essentially form. Now God is the first agent, since He is the first efficient cause. He is therefore of His essence a form; and not composed of matter and form. Reply Objection 1: A soul is attributed to God because His acts resemble the acts of a soul; for, that we will anything, is due to our soul. Hence what is pleasing to His will is said to be pleasing to His soul. Reply Objection 2: Anger and the like are attributed to God on account of a similitude of effect. Thus, because to punish is properly the act of an angry man, God's punishment is metaphorically spoken of as His anger. Reply Objection 3: Forms which can be received in matter are individualized by matter, which cannot be in another as in a subject since it is the first underlying subject; although form of itself, unless something else prevents it, can be received by many. But that form which cannot be received in matter, but is self-subsisting, is individualized precisely because it cannot be received in a subject; and such a form is God. Hence it does not follow that matter exists in God. _______________________
ccore/rhetoric-saint-thomas-aquinas
[ "license:mit", "region:us" ]
2023-10-07T18:22:48+00:00
{"license": "mit"}
2023-10-07T18:25:07+00:00
[]
[]
TAGS #license-mit #region-us
Whether God Is Composed of Matter and Form? Objection 1: It seems that God is composed of matter and form. For whatever has a soul is composed of matter and form; since the soul is the form of the body. But Scripture attributes a soul to God; for it is mentioned in Hebrews (Heb. 10:38), where God says: "But My just man liveth by faith; but if he withdraw himself, he shall not please My soul." Therefore God is composed of matter and form. Objection 2: Further, anger, joy and the like are passions of the composite. But these are attributed to God in Scripture: "The Lord was exceeding angry with His people" (Ps. 105:40). Therefore God is composed of matter and form. Objection 3: Further, matter is the principle of individualization. But God seems to be individual, for He cannot be predicated of many. Therefore He is composed of matter and form. Contrary: Whatever is composed of matter and form is a body; for dimensive quantity is the first property of matter. But God is not a body as proved in the preceding Article; therefore He is not composed of matter and form. Response: It is impossible that matter should exist in God. First, because matter is in potentiality. But we have shown (Q. 2, A. 3) that God is pure act, without any potentiality. Hence it is impossible that God should be composed of matter and form. Secondly, because everything composed of matter and form owes its perfection and goodness to its form; therefore its goodness is participated, inasmuch as matter participates the form. Now the first good and the best--viz. God--is not a participated good, because the essential good is prior to the participated good. Hence it is impossible that God should be composed of matter and form. Thirdly, because every agent acts by its form; hence the manner in which it has its form is the manner in which it is an agent. Therefore whatever is primarily and essentially an agent must be primarily and essentially form. Now God is the first agent, since He is the first efficient cause. He is therefore of His essence a form; and not composed of matter and form. Reply Objection 1: A soul is attributed to God because His acts resemble the acts of a soul; for, that we will anything, is due to our soul. Hence what is pleasing to His will is said to be pleasing to His soul. Reply Objection 2: Anger and the like are attributed to God on account of a similitude of effect. Thus, because to punish is properly the act of an angry man, God's punishment is metaphorically spoken of as His anger. Reply Objection 3: Forms which can be received in matter are individualized by matter, which cannot be in another as in a subject since it is the first underlying subject; although form of itself, unless something else prevents it, can be received by many. But that form which cannot be received in matter, but is self-subsisting, is individualized precisely because it cannot be received in a subject; and such a form is God. Hence it does not follow that matter exists in God. _______________________
[]
[ "TAGS\n#license-mit #region-us \n" ]
[ 11 ]
[ "passage: TAGS\n#license-mit #region-us \n" ]
343256532bf8033bb356e87ee28410a582009015
# Dataset Card for "colorization_dataset2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ummagumm-a/colorization_dataset2
[ "region:us" ]
2023-10-07T18:24:51+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "conditioning_image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 109496781.0, "num_examples": 1000}], "download_size": 109498139, "dataset_size": 109496781.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-07T18:28:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "colorization_dataset2" More Information needed
[ "# Dataset Card for \"colorization_dataset2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"colorization_dataset2\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"colorization_dataset2\"\n\nMore Information needed" ]
f86b33c67732ce19f812235c23e9c48dfab6a389
# Bangumi Image Base of Sekai Saikou No Ansatsusha, Isekai Kizoku Ni Tensei Suru This is the image base of bangumi Sekai Saikou no Ansatsusha, Isekai Kizoku ni Tensei Suru, we detected 32 characters, 1510 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 118 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 40 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 27 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 23 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 17 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 20 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 270 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 9 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 98 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 91 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 20 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 27 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 29 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 23 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 16 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 86 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 11 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 15 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 13 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 14 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 16 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 10 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 6 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | N/A | N/A | | 23 | 39 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 150 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 38 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 70 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 15 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 10 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 11 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 9 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | noise | 169 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/sekaisaikounoansatsushaisekaikizokunitenseisuru
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-07T18:30:47+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-07T19:43:37+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Sekai Saikou No Ansatsusha, Isekai Kizoku Ni Tensei Suru ============================================================================== This is the image base of bangumi Sekai Saikou no Ansatsusha, Isekai Kizoku ni Tensei Suru, we detected 32 characters, 1510 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
f1fcb0b6ef893fadf3fcf8d89fed542057dd1129
# Habitat v0.3.x Episode Datasets and Checkpoints Episode datasets for Social Navigation and Social Rearrangement tasks. The training dataset has 37k episodes and the evaluation dataset has 1.2k episodes. In addition, we released a social nav checkpoint trained based on the above episodes. Please read here for more detail: https://github.com/facebookresearch/habitat-lab/tree/main/habitat-baselines # License Notes: HSSD assets and episodes are provided under cc-by-nc license as a subset of the dataset described here: https://3dlg-hcvc.github.io/hssd/
ai-habitat/hab3_episodes
[ "license:cc-by-nc-4.0", "region:us" ]
2023-10-07T18:36:55+00:00
{"license": "cc-by-nc-4.0", "viewer": false}
2023-11-28T20:31:21+00:00
[]
[]
TAGS #license-cc-by-nc-4.0 #region-us
# Habitat v0.3.x Episode Datasets and Checkpoints Episode datasets for Social Navigation and Social Rearrangement tasks. The training dataset has 37k episodes and the evaluation dataset has 1.2k episodes. In addition, we released a social nav checkpoint trained based on the above episodes. Please read here for more detail: URL # License Notes: HSSD assets and episodes are provided under cc-by-nc license as a subset of the dataset described here: URL
[ "# Habitat v0.3.x Episode Datasets and Checkpoints\n\nEpisode datasets for Social Navigation and Social Rearrangement tasks. The training dataset has 37k episodes and the evaluation dataset has 1.2k episodes.\n\nIn addition, we released a social nav checkpoint trained based on the above episodes. Please read here for more detail: URL", "# License Notes:\n\nHSSD assets and episodes are provided under cc-by-nc license as a subset of the dataset described here: URL" ]
[ "TAGS\n#license-cc-by-nc-4.0 #region-us \n", "# Habitat v0.3.x Episode Datasets and Checkpoints\n\nEpisode datasets for Social Navigation and Social Rearrangement tasks. The training dataset has 37k episodes and the evaluation dataset has 1.2k episodes.\n\nIn addition, we released a social nav checkpoint trained based on the above episodes. Please read here for more detail: URL", "# License Notes:\n\nHSSD assets and episodes are provided under cc-by-nc license as a subset of the dataset described here: URL" ]
[ 17, 76, 33 ]
[ "passage: TAGS\n#license-cc-by-nc-4.0 #region-us \n# Habitat v0.3.x Episode Datasets and Checkpoints\n\nEpisode datasets for Social Navigation and Social Rearrangement tasks. The training dataset has 37k episodes and the evaluation dataset has 1.2k episodes.\n\nIn addition, we released a social nav checkpoint trained based on the above episodes. Please read here for more detail: URL# License Notes:\n\nHSSD assets and episodes are provided under cc-by-nc license as a subset of the dataset described here: URL" ]
eaf5963dcb11e2beb1a10070c01e6a19af4bda70
# Dataset Card for "ncbi_genbank_part_75" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Hack90/ncbi_genbank_part_75
[ "region:us" ]
2023-10-07T18:39:00+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "sequence", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "features", "dtype": "int64"}, {"name": "seq_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 35009212242, "num_examples": 74649}], "download_size": 15493347795, "dataset_size": 35009212242}}
2023-10-07T18:53:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ncbi_genbank_part_75" More Information needed
[ "# Dataset Card for \"ncbi_genbank_part_75\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ncbi_genbank_part_75\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ncbi_genbank_part_75\"\n\nMore Information needed" ]
a37a74e3231bd663f21aaf6dea61832c46577a3a
# Dataset Card for "catholic_model_v2_dataset_20231007_194934" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tr416/catholic_model_v2_dataset_20231007_194934
[ "region:us" ]
2023-10-07T18:49:34+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 760128.0, "num_examples": 296}, {"name": "test", "num_bytes": 7704.0, "num_examples": 3}], "download_size": 52253, "dataset_size": 767832.0}}
2023-10-07T18:49:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "catholic_model_v2_dataset_20231007_194934" More Information needed
[ "# Dataset Card for \"catholic_model_v2_dataset_20231007_194934\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"catholic_model_v2_dataset_20231007_194934\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"catholic_model_v2_dataset_20231007_194934\"\n\nMore Information needed" ]
cfb7ed8828c7b6d7d7ee9869c0bdbf3d3f91b17c
# Dataset Card for "catholic_model_v3_dataset_20231007_195201" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tr416/catholic_model_v3_dataset_20231007_195201
[ "region:us" ]
2023-10-07T18:52:01+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 760128.0, "num_examples": 296}, {"name": "test", "num_bytes": 7704.0, "num_examples": 3}], "download_size": 52212, "dataset_size": 767832.0}}
2023-10-07T18:52:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "catholic_model_v3_dataset_20231007_195201" More Information needed
[ "# Dataset Card for \"catholic_model_v3_dataset_20231007_195201\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"catholic_model_v3_dataset_20231007_195201\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"catholic_model_v3_dataset_20231007_195201\"\n\nMore Information needed" ]
0fc4de903083fd294ef73fd4ec9667f25f191194
This is the 'Floyd' text adventure dataset converted to a chat format with system messages. The system messages were randomly constructed from a table of phrases and templates. The original data can be found in the .7z archive. **Credits:** Thank you to VE Forbryderne from KoboldAI for scraping the dataset.
PocketDoc/Floyd-Text-Adventures
[ "task_categories:conversational", "language:en", "not-for-all-audiences", "region:us" ]
2023-10-07T19:02:05+00:00
{"language": ["en"], "task_categories": ["conversational"], "pretty_name": "Floyd Text Adventures", "tags": ["not-for-all-audiences"]}
2023-10-14T22:37:07+00:00
[]
[ "en" ]
TAGS #task_categories-conversational #language-English #not-for-all-audiences #region-us
This is the 'Floyd' text adventure dataset converted to a chat format with system messages. The system messages were randomly constructed from a table of phrases and templates. The original data can be found in the .7z archive. Credits: Thank you to VE Forbryderne from KoboldAI for scraping the dataset.
[]
[ "TAGS\n#task_categories-conversational #language-English #not-for-all-audiences #region-us \n" ]
[ 29 ]
[ "passage: TAGS\n#task_categories-conversational #language-English #not-for-all-audiences #region-us \n" ]
bbff86e16d3d865d5a2a37bd72c5b3573e804f01
This is the 'CYS' text adventure dataset converted to a chat format with system messages. The system messages were randomly constructed from a table of phrases and templates. The original data can be found in the .7z archive. **Credits:** Thank you to VE Forbryderne from KoboldAI for scraping the dataset.
PocketDoc/Choose-Your-Story-Long-Text-Adventures
[ "task_categories:conversational", "language:en", "not-for-all-audiences", "region:us" ]
2023-10-07T19:04:56+00:00
{"language": ["en"], "task_categories": ["conversational"], "pretty_name": "Choose Your Story Novel Format Text Adventures", "tags": ["not-for-all-audiences"]}
2023-10-16T03:39:05+00:00
[]
[ "en" ]
TAGS #task_categories-conversational #language-English #not-for-all-audiences #region-us
This is the 'CYS' text adventure dataset converted to a chat format with system messages. The system messages were randomly constructed from a table of phrases and templates. The original data can be found in the .7z archive. Credits: Thank you to VE Forbryderne from KoboldAI for scraping the dataset.
[]
[ "TAGS\n#task_categories-conversational #language-English #not-for-all-audiences #region-us \n" ]
[ 29 ]
[ "passage: TAGS\n#task_categories-conversational #language-English #not-for-all-audiences #region-us \n" ]
45ed398c91e868f7e0d9a79b10c84c1e831236fd
# Dataset Card for "ncbi_genbank_part_76" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Hack90/ncbi_genbank_part_76
[ "region:us" ]
2023-10-07T19:08:38+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "sequence", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "features", "dtype": "int64"}, {"name": "seq_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 31427190646, "num_examples": 832959}], "download_size": 13887863083, "dataset_size": 31427190646}}
2023-10-07T19:21:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ncbi_genbank_part_76" More Information needed
[ "# Dataset Card for \"ncbi_genbank_part_76\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ncbi_genbank_part_76\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ncbi_genbank_part_76\"\n\nMore Information needed" ]
d556f6d02f7d66e05cb3a22f304dffbae69cedd1
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> .csv файл датасета. Он состоит из " вопрос - ответ. Он был сделан в стиле Хатсуне Мику, она любит Пашу. так же она помощница, помогает в ЛС людям с вопросами по игре Highrise ## Dataset Details ### Dataset Description
MikuHH/gghh
[ "task_categories:question-answering", "size_categories:n<1K", "language:ru", "region:us" ]
2023-10-07T19:09:42+00:00
{"language": ["ru"], "size_categories": ["n<1K"], "task_categories": ["question-answering"]}
2023-10-21T07:13:20+00:00
[]
[ "ru" ]
TAGS #task_categories-question-answering #size_categories-n<1K #language-Russian #region-us
# Dataset Card for Dataset Name .csv файл датасета. Он состоит из " вопрос - ответ. Он был сделан в стиле Хатсуне Мику, она любит Пашу. так же она помощница, помогает в ЛС людям с вопросами по игре Highrise ## Dataset Details ### Dataset Description
[ "# Dataset Card for Dataset Name\n\n\n\n.csv файл датасета. Он состоит из \" вопрос - ответ.\nОн был сделан в стиле Хатсуне Мику, она любит Пашу. так же она помощница, помогает в ЛС людям с вопросами по игре Highrise", "## Dataset Details", "### Dataset Description" ]
[ "TAGS\n#task_categories-question-answering #size_categories-n<1K #language-Russian #region-us \n", "# Dataset Card for Dataset Name\n\n\n\n.csv файл датасета. Он состоит из \" вопрос - ответ.\nОн был сделан в стиле Хатсуне Мику, она любит Пашу. так же она помощница, помогает в ЛС людям с вопросами по игре Highrise", "## Dataset Details", "### Dataset Description" ]
[ 33, 61, 4, 5 ]
[ "passage: TAGS\n#task_categories-question-answering #size_categories-n<1K #language-Russian #region-us \n# Dataset Card for Dataset Name\n\n\n\n.csv файл датасета. Он состоит из \" вопрос - ответ.\nОн был сделан в стиле Хатсуне Мику, она любит Пашу. так же она помощница, помогает в ЛС людям с вопросами по игре Highrise## Dataset Details### Dataset Description" ]
d7db6ef1f9d7be2a563f2461b6ebe60d8bdcffe5
# Bangumi Image Base of Ishuzoku Reviewers This is the image base of bangumi Ishuzoku Reviewers, we detected 37 characters, 1196 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:---------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------|:-------------------------------| | 0 | 148 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 25 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 24 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 11 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 12 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 8 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 201 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 9 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 15 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 9 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 6 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | N/A | N/A | | 11 | 14 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 202 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 18 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 7 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | N/A | | 15 | 19 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 11 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 7 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | N/A | | 18 | 59 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 11 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 9 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 7 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | N/A | | 22 | 49 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 14 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 11 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 13 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 9 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 7 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | N/A | | 28 | 9 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 7 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | N/A | | 30 | 6 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | N/A | N/A | | 31 | 6 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | N/A | N/A | | 32 | 5 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | N/A | N/A | N/A | | 33 | 21 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 7 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | N/A | | 35 | 5 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | N/A | N/A | N/A | | noise | 195 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/ishuzokureviewers
[ "size_categories:1K<n<10K", "license:mit", "art", "region:us" ]
2023-10-07T19:14:37+00:00
{"license": "mit", "size_categories": ["1K<n<10K"], "tags": ["art"]}
2023-10-07T20:23:49+00:00
[]
[]
TAGS #size_categories-1K<n<10K #license-mit #art #region-us
Bangumi Image Base of Ishuzoku Reviewers ======================================== This is the image base of bangumi Ishuzoku Reviewers, we detected 37 characters, 1196 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #license-mit #art #region-us \n" ]
7ca1eb4c2a66f3c8d58390ef7022c18ada623ae5
# Dataset Card for "ncbi_genbank_part_77" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Hack90/ncbi_genbank_part_77
[ "region:us" ]
2023-10-07T19:35:16+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "sequence", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "features", "dtype": "int64"}, {"name": "seq_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 29897565069, "num_examples": 1177983}], "download_size": 13158660518, "dataset_size": 29897565069}}
2023-10-07T19:46:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ncbi_genbank_part_77" More Information needed
[ "# Dataset Card for \"ncbi_genbank_part_77\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ncbi_genbank_part_77\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ncbi_genbank_part_77\"\n\nMore Information needed" ]
04688d127f730301b7e206b0ec3ba81ebcec83f6
# Dataset Card for "catholic_v3_dataset_20231007_212725" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tr416/catholic_v3_dataset_20231007_212725
[ "region:us" ]
2023-10-07T20:27:25+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 760128.0, "num_examples": 296}, {"name": "test", "num_bytes": 7704.0, "num_examples": 3}], "download_size": 52202, "dataset_size": 767832.0}}
2023-10-07T20:27:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "catholic_v3_dataset_20231007_212725" More Information needed
[ "# Dataset Card for \"catholic_v3_dataset_20231007_212725\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"catholic_v3_dataset_20231007_212725\"\n\nMore Information needed" ]
[ 6, 27 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"catholic_v3_dataset_20231007_212725\"\n\nMore Information needed" ]
25cf154a0b5d32045242d4b931b50d0da29b556a
# Dataset Card for "eli5_dataset_title_text" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Safeer143/eli5_dataset_title_text
[ "region:us" ]
2023-10-07T21:15:03+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1224245207, "num_examples": 1442904}], "download_size": 0, "dataset_size": 1224245207}}
2023-10-18T09:56:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "eli5_dataset_title_text" More Information needed
[ "# Dataset Card for \"eli5_dataset_title_text\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"eli5_dataset_title_text\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"eli5_dataset_title_text\"\n\nMore Information needed" ]
c4efa6059fcf851f082fa1d88d648bd3d1a8374a
# Dataset Card for "ziq-depression_tweet-en" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Intuit-GenSRF/ziq-depression-tweet-es
[ "region:us" ]
2023-10-07T21:25:26+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "sequence": "string"}, {"name": "processed_text", "sequence": "string"}, {"name": "num_tokens", "dtype": "int64"}, {"name": "text_en", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 51261868, "num_examples": 51132}], "download_size": 32137564, "dataset_size": 51261868}}
2023-10-07T21:25:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ziq-depression_tweet-en" More Information needed
[ "# Dataset Card for \"ziq-depression_tweet-en\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ziq-depression_tweet-en\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ziq-depression_tweet-en\"\n\nMore Information needed" ]
b05527daad2b1304c7cc51e2a78c0dd9fca9d5e5
# Dataset Card for "English-to-Moroccan-Darija" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
BounharAbdelaziz/English-to-Moroccan-Darija
[ "task_categories:translation", "size_categories:10K<n<100K", "language:ar", "region:us" ]
2023-10-07T22:48:46+00:00
{"language": ["ar"], "size_categories": ["10K<n<100K"], "task_categories": ["translation"], "dataset_info": {"features": [{"name": "english", "dtype": "string"}, {"name": "darija", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 636610, "num_examples": 10062}], "download_size": 447249, "dataset_size": 636610}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-07T22:51:00+00:00
[]
[ "ar" ]
TAGS #task_categories-translation #size_categories-10K<n<100K #language-Arabic #region-us
# Dataset Card for "English-to-Moroccan-Darija" More Information needed
[ "# Dataset Card for \"English-to-Moroccan-Darija\"\n\nMore Information needed" ]
[ "TAGS\n#task_categories-translation #size_categories-10K<n<100K #language-Arabic #region-us \n", "# Dataset Card for \"English-to-Moroccan-Darija\"\n\nMore Information needed" ]
[ 32, 20 ]
[ "passage: TAGS\n#task_categories-translation #size_categories-10K<n<100K #language-Arabic #region-us \n# Dataset Card for \"English-to-Moroccan-Darija\"\n\nMore Information needed" ]
c92734c456f24faa9868bacdf8f9ff3b775cfa20
# Dataset Card for "dataset4sentinement_HSE" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
marcus2000/dataset4sentinement_HSE
[ "region:us" ]
2023-10-07T23:13:47+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 3679508.0480941418, "num_examples": 3322}, {"name": "test", "num_bytes": 650171.9519058582, "num_examples": 587}], "download_size": 2311435, "dataset_size": 4329680.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2023-10-07T23:39:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dataset4sentinement_HSE" More Information needed
[ "# Dataset Card for \"dataset4sentinement_HSE\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dataset4sentinement_HSE\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"dataset4sentinement_HSE\"\n\nMore Information needed" ]
2714b0127903268ac1790a27033ae4f6d141a208
# Dataset Card for "20231007_chai_prize_model_feedback_all" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Jellywibble/20231007_chai_prize_model_feedback_all
[ "region:us" ]
2023-10-07T23:13:48+00:00
{"dataset_info": {"features": [{"name": "conversation_id", "dtype": "string"}, {"name": "bot_id", "dtype": "string"}, {"name": "user_id", "dtype": "string"}, {"name": "conversation", "dtype": "string"}, {"name": "thumbs_up", "dtype": "bool"}, {"name": "feedback", "dtype": "string"}, {"name": "model_name", "dtype": "string"}, {"name": "season", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 242533107, "num_examples": 124233}], "download_size": 127593487, "dataset_size": 242533107}}
2023-10-07T23:14:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for "20231007_chai_prize_model_feedback_all" More Information needed
[ "# Dataset Card for \"20231007_chai_prize_model_feedback_all\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"20231007_chai_prize_model_feedback_all\"\n\nMore Information needed" ]
[ 6, 26 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"20231007_chai_prize_model_feedback_all\"\n\nMore Information needed" ]
0860e0244e24c40f06b1bc2c6931222e03875776
# Dataset Card for "science_qa" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
KonstantyM/science_qa
[ "region:us" ]
2023-10-07T23:19:43+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 7497499873, "num_examples": 4432703}], "download_size": 4282191598, "dataset_size": 7497499873}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-07T23:23:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for "science_qa" More Information needed
[ "# Dataset Card for \"science_qa\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"science_qa\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"science_qa\"\n\nMore Information needed" ]
286aa552320989e7e81f25f62004cd9dddcdffbe
# Dataset Card for "v2_dataset_20231008_002216" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tr416/v2_dataset_20231008_002216
[ "region:us" ]
2023-10-07T23:22:16+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 75203880.0, "num_examples": 29285}, {"name": "test", "num_bytes": 760128.0, "num_examples": 296}], "download_size": 12799490, "dataset_size": 75964008.0}}
2023-10-07T23:22:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "v2_dataset_20231008_002216" More Information needed
[ "# Dataset Card for \"v2_dataset_20231008_002216\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"v2_dataset_20231008_002216\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"v2_dataset_20231008_002216\"\n\nMore Information needed" ]
3f925351c11fb5513c950068096b74924c96f742
# Dataset Card for "v2_dataset_20231008_002613" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tr416/v2_dataset_20231008_002613
[ "region:us" ]
2023-10-07T23:26:13+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 75203880.0, "num_examples": 29285}, {"name": "test", "num_bytes": 760128.0, "num_examples": 296}], "download_size": 12818386, "dataset_size": 75964008.0}}
2023-10-07T23:26:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "v2_dataset_20231008_002613" More Information needed
[ "# Dataset Card for \"v2_dataset_20231008_002613\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"v2_dataset_20231008_002613\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"v2_dataset_20231008_002613\"\n\nMore Information needed" ]
6557a1eb1fd5d747e8b64750eacbc9a9dcd878a3
# Dataset Card for "v2_dataset_20231008_002916" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tr416/v2_dataset_20231008_002916
[ "region:us" ]
2023-10-07T23:29:24+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 75203880.0, "num_examples": 29285}, {"name": "test", "num_bytes": 760128.0, "num_examples": 296}], "download_size": 12811954, "dataset_size": 75964008.0}}
2023-10-07T23:29:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "v2_dataset_20231008_002916" More Information needed
[ "# Dataset Card for \"v2_dataset_20231008_002916\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"v2_dataset_20231008_002916\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"v2_dataset_20231008_002916\"\n\nMore Information needed" ]
8b55bdcbce8ad4b95bff2c02631c313c827e167b
# Dataset Card for "v2_dataset_20231008_003113" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tr416/v2_dataset_20231008_003113
[ "region:us" ]
2023-10-07T23:31:13+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 75203880.0, "num_examples": 29285}, {"name": "test", "num_bytes": 760128.0, "num_examples": 296}], "download_size": 12796324, "dataset_size": 75964008.0}}
2023-10-07T23:31:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "v2_dataset_20231008_003113" More Information needed
[ "# Dataset Card for \"v2_dataset_20231008_003113\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"v2_dataset_20231008_003113\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"v2_dataset_20231008_003113\"\n\nMore Information needed" ]
afdc29da8046417af915b50774ba47e82a7394d8
# Dataset Card for "v2_dataset_20231008_003227" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tr416/v2_dataset_20231008_003227
[ "region:us" ]
2023-10-07T23:32:27+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 75203880.0, "num_examples": 29285}, {"name": "test", "num_bytes": 760128.0, "num_examples": 296}], "download_size": 12825566, "dataset_size": 75964008.0}}
2023-10-07T23:32:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "v2_dataset_20231008_003227" More Information needed
[ "# Dataset Card for \"v2_dataset_20231008_003227\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"v2_dataset_20231008_003227\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"v2_dataset_20231008_003227\"\n\nMore Information needed" ]
6d1a61c51f651dac97f3a4343a6f0e291d9d820c
# Dataset Card for "1ColDedupedRefDatasetWMetricFinal" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Fraol/1ColDedupedRefDatasetWMetricFinal
[ "region:us" ]
2023-10-08T00:42:07+00:00
{"dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "path_name", "dtype": "string"}, {"name": "file_name", "dtype": "string"}, {"name": "ref_type", "dtype": "string"}, {"name": "hash", "dtype": "string"}, {"name": "class_name", "dtype": "string"}, {"name": "method_name", "dtype": "string"}, {"name": "row_number", "dtype": "int64"}, {"name": "cbo", "dtype": "float64"}, {"name": "wmc", "dtype": "float64"}, {"name": "lcom*", "dtype": "float64"}, {"name": "loc", "dtype": "float64"}, {"name": "source_after", "dtype": "string"}, {"name": "cbo_after", "dtype": "float64"}, {"name": "wmc_after", "dtype": "float64"}, {"name": "lcom*_after", "dtype": "float64"}, {"name": "loc_after", "dtype": "float64"}, {"name": "issue_name", "dtype": "string"}, {"name": "issue_localize", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 476226598, "num_examples": 37325}], "download_size": 0, "dataset_size": 476226598}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-08T02:23:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "1ColDedupedRefDatasetWMetricFinal" More Information needed
[ "# Dataset Card for \"1ColDedupedRefDatasetWMetricFinal\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"1ColDedupedRefDatasetWMetricFinal\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"1ColDedupedRefDatasetWMetricFinal\"\n\nMore Information needed" ]
87f2153638926edba1c903b9bc675ef018a4cc8d
[[FFN paper]](https://www.nature.com/articles/s41592-018-0049-4), [[data from the paper]](https://storage.googleapis.com/j0126-nature-methods-data/GgwKmcKgrcoNxJccKuGIzRnQqfit9hnfK1ctZzNbnuU/README.txt) We downloaded and processed the data into formats that are easier to share. - Ground truth - train: `j0126-train-34vol.zip`, 34 densely labeled subvolumes - validaiton: `valid_12_skeletons.h5`, manually traced skeletons for 12 neurons - test: `test_50_skeletons.h5`, manually traced skeletons for 50 neurons - FFN result (due to the file size constraint, we split the zip into two files) ``` # first, merge the two parts together zip -s 0 ffn_seg_part.zip --out ffn_seg.zip rm ffn_seg_part* # then, unzip the segmentation in tiles of the size 128x2048x2048 (saved in h5) unzip ffn_seg.zip ```
pytc/zebrafinch-j0126
[ "license:mit", "biology", "region:us" ]
2023-10-08T01:09:05+00:00
{"license": "mit", "tags": ["biology"]}
2024-01-08T00:38:27+00:00
[]
[]
TAGS #license-mit #biology #region-us
[[FFN paper]](URL [[data from the paper]](URL We downloaded and processed the data into formats that are easier to share. - Ground truth - train: 'URL', 34 densely labeled subvolumes - validaiton: 'valid_12_skeletons.h5', manually traced skeletons for 12 neurons - test: 'test_50_skeletons.h5', manually traced skeletons for 50 neurons - FFN result (due to the file size constraint, we split the zip into two files)
[]
[ "TAGS\n#license-mit #biology #region-us \n" ]
[ 14 ]
[ "passage: TAGS\n#license-mit #biology #region-us \n" ]
54f4c93ace5cf6ed443293837c488c0d602dbadc
# Dataset Card for "qa_processed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
pbaoo2705/qa_processed
[ "region:us" ]
2023-10-08T01:17:01+00:00
{"dataset_info": {"features": [{"name": "pubid", "dtype": "int32"}, {"name": "question", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "long_answer", "dtype": "string"}, {"name": "final_decision", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "answers", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9675160, "num_examples": 5000}], "download_size": 5668568, "dataset_size": 9675160}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-08T01:17:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qa_processed" More Information needed
[ "# Dataset Card for \"qa_processed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qa_processed\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qa_processed\"\n\nMore Information needed" ]
b882caa0cd5f47d7283c1726919b0447f910375e
# Dataset Card for "qa_processed_eval" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
pbaoo2705/qa_processed_eval
[ "region:us" ]
2023-10-08T01:17:04+00:00
{"dataset_info": {"features": [{"name": "pubid", "dtype": "int32"}, {"name": "question", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "long_answer", "dtype": "string"}, {"name": "final_decision", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "answers", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 1930174, "num_examples": 1000}], "download_size": 1132172, "dataset_size": 1930174}, "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}]}]}
2023-10-08T01:17:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qa_processed_eval" More Information needed
[ "# Dataset Card for \"qa_processed_eval\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qa_processed_eval\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"qa_processed_eval\"\n\nMore Information needed" ]
e0cc5b57ad9b7137918dd991634b3aa969799be0
# Dataset Card for "tiny-codes-alpaca" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
layoric/tiny-codes-alpaca
[ "region:us" ]
2023-10-08T01:25:40+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "main_topic", "dtype": "string"}, {"name": "subtopic", "dtype": "string"}, {"name": "adjective", "dtype": "string"}, {"name": "action_verb", "dtype": "string"}, {"name": "scenario", "dtype": "string"}, {"name": "target_audience", "dtype": "string"}, {"name": "programming_language", "dtype": "string"}, {"name": "common_sense_topic", "dtype": "string"}, {"name": "idx", "dtype": "int64"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3795436393, "num_examples": 1632309}], "download_size": 1642754203, "dataset_size": 3795436393}}
2023-10-08T01:28:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tiny-codes-alpaca" More Information needed
[ "# Dataset Card for \"tiny-codes-alpaca\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tiny-codes-alpaca\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"tiny-codes-alpaca\"\n\nMore Information needed" ]
e79872cd87f4605de97e495c8ebefdb30ebf5b59
# Dataset Card for "30000_christian_non_denominational_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
AiForTheChurch/30000_christian_non_denominational_dataset
[ "region:us" ]
2023-10-08T01:28:45+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "user", "dtype": "string"}, {"name": "llm", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 29503223, "num_examples": 29581}], "download_size": 15020646, "dataset_size": 29503223}}
2023-10-08T01:28:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "30000_christian_non_denominational_dataset" More Information needed
[ "# Dataset Card for \"30000_christian_non_denominational_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"30000_christian_non_denominational_dataset\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"30000_christian_non_denominational_dataset\"\n\nMore Information needed" ]
df5a5c17df819184dbdd859d601359669c590075
# Dataset Card for "tiny-codes-alpaca-csharp" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
layoric/tiny-codes-alpaca-csharp
[ "region:us" ]
2023-10-08T01:33:48+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "main_topic", "dtype": "string"}, {"name": "subtopic", "dtype": "string"}, {"name": "adjective", "dtype": "string"}, {"name": "action_verb", "dtype": "string"}, {"name": "scenario", "dtype": "string"}, {"name": "target_audience", "dtype": "string"}, {"name": "programming_language", "dtype": "string"}, {"name": "common_sense_topic", "dtype": "string"}, {"name": "idx", "dtype": "int64"}, {"name": "output", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 326727978, "num_examples": 125478}], "download_size": 126103184, "dataset_size": 326727978}}
2023-10-08T01:45:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tiny-codes-alpaca-csharp" More Information needed
[ "# Dataset Card for \"tiny-codes-alpaca-csharp\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tiny-codes-alpaca-csharp\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"tiny-codes-alpaca-csharp\"\n\nMore Information needed" ]
de54c27d37474f2ad7c346f5fd26d1cfd36a4a2a
# Dataset Card for "catholic_denomination_300" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
AiForTheChurch/catholic_denomination_300
[ "region:us" ]
2023-10-08T01:46:19+00:00
{"dataset_info": {"features": [{"name": "user", "dtype": "string"}, {"name": "llm", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 172156, "num_examples": 300}], "download_size": 91806, "dataset_size": 172156}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-08T01:46:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "catholic_denomination_300" More Information needed
[ "# Dataset Card for \"catholic_denomination_300\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"catholic_denomination_300\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"catholic_denomination_300\"\n\nMore Information needed" ]
fd84aaa38d909d9cfc872cc67decfb12e1ca9109
# GamePhysics Dataset (Daily Dump)
asgaardlab/GamePhysicsDailyDump
[ "task_categories:video-classification", "language:en", "license:mit", "game", "game-physics", "game-bug", "video-understanding", "region:us" ]
2023-10-08T02:05:20+00:00
{"language": ["en"], "license": "mit", "task_categories": ["video-classification"], "pretty_name": "GamePhysics", "tags": ["game", "game-physics", "game-bug", "video-understanding"]}
2024-02-15T07:07:33+00:00
[]
[ "en" ]
TAGS #task_categories-video-classification #language-English #license-mit #game #game-physics #game-bug #video-understanding #region-us
# GamePhysics Dataset (Daily Dump)
[ "# GamePhysics Dataset (Daily Dump)" ]
[ "TAGS\n#task_categories-video-classification #language-English #license-mit #game #game-physics #game-bug #video-understanding #region-us \n", "# GamePhysics Dataset (Daily Dump)" ]
[ 43, 12 ]
[ "passage: TAGS\n#task_categories-video-classification #language-English #license-mit #game #game-physics #game-bug #video-understanding #region-us \n# GamePhysics Dataset (Daily Dump)" ]
1ad4c2eaa86924847aae6db6d276eb9f3754d685
# Bangumi Image Base of Naruto Shippuden This is the image base of bangumi Naruto Shippuden, we detected 196 characters, 36722 images in total. The full dataset is [here](all.zip). **Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual.** If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview: | # | Images | Download | Preview 1 | Preview 2 | Preview 3 | Preview 4 | Preview 5 | Preview 6 | Preview 7 | Preview 8 | |:------|---------:|:----------------------------|:--------------------------------|:--------------------------------|:--------------------------------|:--------------------------------|:--------------------------------|:--------------------------------|:--------------------------------|:--------------------------------| | 0 | 2958 | [Download](0/dataset.zip) | ![preview 1](0/preview_1.png) | ![preview 2](0/preview_2.png) | ![preview 3](0/preview_3.png) | ![preview 4](0/preview_4.png) | ![preview 5](0/preview_5.png) | ![preview 6](0/preview_6.png) | ![preview 7](0/preview_7.png) | ![preview 8](0/preview_8.png) | | 1 | 726 | [Download](1/dataset.zip) | ![preview 1](1/preview_1.png) | ![preview 2](1/preview_2.png) | ![preview 3](1/preview_3.png) | ![preview 4](1/preview_4.png) | ![preview 5](1/preview_5.png) | ![preview 6](1/preview_6.png) | ![preview 7](1/preview_7.png) | ![preview 8](1/preview_8.png) | | 2 | 1111 | [Download](2/dataset.zip) | ![preview 1](2/preview_1.png) | ![preview 2](2/preview_2.png) | ![preview 3](2/preview_3.png) | ![preview 4](2/preview_4.png) | ![preview 5](2/preview_5.png) | ![preview 6](2/preview_6.png) | ![preview 7](2/preview_7.png) | ![preview 8](2/preview_8.png) | | 3 | 442 | [Download](3/dataset.zip) | ![preview 1](3/preview_1.png) | ![preview 2](3/preview_2.png) | ![preview 3](3/preview_3.png) | ![preview 4](3/preview_4.png) | ![preview 5](3/preview_5.png) | ![preview 6](3/preview_6.png) | ![preview 7](3/preview_7.png) | ![preview 8](3/preview_8.png) | | 4 | 132 | [Download](4/dataset.zip) | ![preview 1](4/preview_1.png) | ![preview 2](4/preview_2.png) | ![preview 3](4/preview_3.png) | ![preview 4](4/preview_4.png) | ![preview 5](4/preview_5.png) | ![preview 6](4/preview_6.png) | ![preview 7](4/preview_7.png) | ![preview 8](4/preview_8.png) | | 5 | 1913 | [Download](5/dataset.zip) | ![preview 1](5/preview_1.png) | ![preview 2](5/preview_2.png) | ![preview 3](5/preview_3.png) | ![preview 4](5/preview_4.png) | ![preview 5](5/preview_5.png) | ![preview 6](5/preview_6.png) | ![preview 7](5/preview_7.png) | ![preview 8](5/preview_8.png) | | 6 | 80 | [Download](6/dataset.zip) | ![preview 1](6/preview_1.png) | ![preview 2](6/preview_2.png) | ![preview 3](6/preview_3.png) | ![preview 4](6/preview_4.png) | ![preview 5](6/preview_5.png) | ![preview 6](6/preview_6.png) | ![preview 7](6/preview_7.png) | ![preview 8](6/preview_8.png) | | 7 | 719 | [Download](7/dataset.zip) | ![preview 1](7/preview_1.png) | ![preview 2](7/preview_2.png) | ![preview 3](7/preview_3.png) | ![preview 4](7/preview_4.png) | ![preview 5](7/preview_5.png) | ![preview 6](7/preview_6.png) | ![preview 7](7/preview_7.png) | ![preview 8](7/preview_8.png) | | 8 | 7149 | [Download](8/dataset.zip) | ![preview 1](8/preview_1.png) | ![preview 2](8/preview_2.png) | ![preview 3](8/preview_3.png) | ![preview 4](8/preview_4.png) | ![preview 5](8/preview_5.png) | ![preview 6](8/preview_6.png) | ![preview 7](8/preview_7.png) | ![preview 8](8/preview_8.png) | | 9 | 71 | [Download](9/dataset.zip) | ![preview 1](9/preview_1.png) | ![preview 2](9/preview_2.png) | ![preview 3](9/preview_3.png) | ![preview 4](9/preview_4.png) | ![preview 5](9/preview_5.png) | ![preview 6](9/preview_6.png) | ![preview 7](9/preview_7.png) | ![preview 8](9/preview_8.png) | | 10 | 946 | [Download](10/dataset.zip) | ![preview 1](10/preview_1.png) | ![preview 2](10/preview_2.png) | ![preview 3](10/preview_3.png) | ![preview 4](10/preview_4.png) | ![preview 5](10/preview_5.png) | ![preview 6](10/preview_6.png) | ![preview 7](10/preview_7.png) | ![preview 8](10/preview_8.png) | | 11 | 159 | [Download](11/dataset.zip) | ![preview 1](11/preview_1.png) | ![preview 2](11/preview_2.png) | ![preview 3](11/preview_3.png) | ![preview 4](11/preview_4.png) | ![preview 5](11/preview_5.png) | ![preview 6](11/preview_6.png) | ![preview 7](11/preview_7.png) | ![preview 8](11/preview_8.png) | | 12 | 1667 | [Download](12/dataset.zip) | ![preview 1](12/preview_1.png) | ![preview 2](12/preview_2.png) | ![preview 3](12/preview_3.png) | ![preview 4](12/preview_4.png) | ![preview 5](12/preview_5.png) | ![preview 6](12/preview_6.png) | ![preview 7](12/preview_7.png) | ![preview 8](12/preview_8.png) | | 13 | 109 | [Download](13/dataset.zip) | ![preview 1](13/preview_1.png) | ![preview 2](13/preview_2.png) | ![preview 3](13/preview_3.png) | ![preview 4](13/preview_4.png) | ![preview 5](13/preview_5.png) | ![preview 6](13/preview_6.png) | ![preview 7](13/preview_7.png) | ![preview 8](13/preview_8.png) | | 14 | 158 | [Download](14/dataset.zip) | ![preview 1](14/preview_1.png) | ![preview 2](14/preview_2.png) | ![preview 3](14/preview_3.png) | ![preview 4](14/preview_4.png) | ![preview 5](14/preview_5.png) | ![preview 6](14/preview_6.png) | ![preview 7](14/preview_7.png) | ![preview 8](14/preview_8.png) | | 15 | 94 | [Download](15/dataset.zip) | ![preview 1](15/preview_1.png) | ![preview 2](15/preview_2.png) | ![preview 3](15/preview_3.png) | ![preview 4](15/preview_4.png) | ![preview 5](15/preview_5.png) | ![preview 6](15/preview_6.png) | ![preview 7](15/preview_7.png) | ![preview 8](15/preview_8.png) | | 16 | 1473 | [Download](16/dataset.zip) | ![preview 1](16/preview_1.png) | ![preview 2](16/preview_2.png) | ![preview 3](16/preview_3.png) | ![preview 4](16/preview_4.png) | ![preview 5](16/preview_5.png) | ![preview 6](16/preview_6.png) | ![preview 7](16/preview_7.png) | ![preview 8](16/preview_8.png) | | 17 | 1392 | [Download](17/dataset.zip) | ![preview 1](17/preview_1.png) | ![preview 2](17/preview_2.png) | ![preview 3](17/preview_3.png) | ![preview 4](17/preview_4.png) | ![preview 5](17/preview_5.png) | ![preview 6](17/preview_6.png) | ![preview 7](17/preview_7.png) | ![preview 8](17/preview_8.png) | | 18 | 88 | [Download](18/dataset.zip) | ![preview 1](18/preview_1.png) | ![preview 2](18/preview_2.png) | ![preview 3](18/preview_3.png) | ![preview 4](18/preview_4.png) | ![preview 5](18/preview_5.png) | ![preview 6](18/preview_6.png) | ![preview 7](18/preview_7.png) | ![preview 8](18/preview_8.png) | | 19 | 70 | [Download](19/dataset.zip) | ![preview 1](19/preview_1.png) | ![preview 2](19/preview_2.png) | ![preview 3](19/preview_3.png) | ![preview 4](19/preview_4.png) | ![preview 5](19/preview_5.png) | ![preview 6](19/preview_6.png) | ![preview 7](19/preview_7.png) | ![preview 8](19/preview_8.png) | | 20 | 333 | [Download](20/dataset.zip) | ![preview 1](20/preview_1.png) | ![preview 2](20/preview_2.png) | ![preview 3](20/preview_3.png) | ![preview 4](20/preview_4.png) | ![preview 5](20/preview_5.png) | ![preview 6](20/preview_6.png) | ![preview 7](20/preview_7.png) | ![preview 8](20/preview_8.png) | | 21 | 178 | [Download](21/dataset.zip) | ![preview 1](21/preview_1.png) | ![preview 2](21/preview_2.png) | ![preview 3](21/preview_3.png) | ![preview 4](21/preview_4.png) | ![preview 5](21/preview_5.png) | ![preview 6](21/preview_6.png) | ![preview 7](21/preview_7.png) | ![preview 8](21/preview_8.png) | | 22 | 628 | [Download](22/dataset.zip) | ![preview 1](22/preview_1.png) | ![preview 2](22/preview_2.png) | ![preview 3](22/preview_3.png) | ![preview 4](22/preview_4.png) | ![preview 5](22/preview_5.png) | ![preview 6](22/preview_6.png) | ![preview 7](22/preview_7.png) | ![preview 8](22/preview_8.png) | | 23 | 139 | [Download](23/dataset.zip) | ![preview 1](23/preview_1.png) | ![preview 2](23/preview_2.png) | ![preview 3](23/preview_3.png) | ![preview 4](23/preview_4.png) | ![preview 5](23/preview_5.png) | ![preview 6](23/preview_6.png) | ![preview 7](23/preview_7.png) | ![preview 8](23/preview_8.png) | | 24 | 418 | [Download](24/dataset.zip) | ![preview 1](24/preview_1.png) | ![preview 2](24/preview_2.png) | ![preview 3](24/preview_3.png) | ![preview 4](24/preview_4.png) | ![preview 5](24/preview_5.png) | ![preview 6](24/preview_6.png) | ![preview 7](24/preview_7.png) | ![preview 8](24/preview_8.png) | | 25 | 1193 | [Download](25/dataset.zip) | ![preview 1](25/preview_1.png) | ![preview 2](25/preview_2.png) | ![preview 3](25/preview_3.png) | ![preview 4](25/preview_4.png) | ![preview 5](25/preview_5.png) | ![preview 6](25/preview_6.png) | ![preview 7](25/preview_7.png) | ![preview 8](25/preview_8.png) | | 26 | 287 | [Download](26/dataset.zip) | ![preview 1](26/preview_1.png) | ![preview 2](26/preview_2.png) | ![preview 3](26/preview_3.png) | ![preview 4](26/preview_4.png) | ![preview 5](26/preview_5.png) | ![preview 6](26/preview_6.png) | ![preview 7](26/preview_7.png) | ![preview 8](26/preview_8.png) | | 27 | 142 | [Download](27/dataset.zip) | ![preview 1](27/preview_1.png) | ![preview 2](27/preview_2.png) | ![preview 3](27/preview_3.png) | ![preview 4](27/preview_4.png) | ![preview 5](27/preview_5.png) | ![preview 6](27/preview_6.png) | ![preview 7](27/preview_7.png) | ![preview 8](27/preview_8.png) | | 28 | 45 | [Download](28/dataset.zip) | ![preview 1](28/preview_1.png) | ![preview 2](28/preview_2.png) | ![preview 3](28/preview_3.png) | ![preview 4](28/preview_4.png) | ![preview 5](28/preview_5.png) | ![preview 6](28/preview_6.png) | ![preview 7](28/preview_7.png) | ![preview 8](28/preview_8.png) | | 29 | 49 | [Download](29/dataset.zip) | ![preview 1](29/preview_1.png) | ![preview 2](29/preview_2.png) | ![preview 3](29/preview_3.png) | ![preview 4](29/preview_4.png) | ![preview 5](29/preview_5.png) | ![preview 6](29/preview_6.png) | ![preview 7](29/preview_7.png) | ![preview 8](29/preview_8.png) | | 30 | 356 | [Download](30/dataset.zip) | ![preview 1](30/preview_1.png) | ![preview 2](30/preview_2.png) | ![preview 3](30/preview_3.png) | ![preview 4](30/preview_4.png) | ![preview 5](30/preview_5.png) | ![preview 6](30/preview_6.png) | ![preview 7](30/preview_7.png) | ![preview 8](30/preview_8.png) | | 31 | 172 | [Download](31/dataset.zip) | ![preview 1](31/preview_1.png) | ![preview 2](31/preview_2.png) | ![preview 3](31/preview_3.png) | ![preview 4](31/preview_4.png) | ![preview 5](31/preview_5.png) | ![preview 6](31/preview_6.png) | ![preview 7](31/preview_7.png) | ![preview 8](31/preview_8.png) | | 32 | 85 | [Download](32/dataset.zip) | ![preview 1](32/preview_1.png) | ![preview 2](32/preview_2.png) | ![preview 3](32/preview_3.png) | ![preview 4](32/preview_4.png) | ![preview 5](32/preview_5.png) | ![preview 6](32/preview_6.png) | ![preview 7](32/preview_7.png) | ![preview 8](32/preview_8.png) | | 33 | 122 | [Download](33/dataset.zip) | ![preview 1](33/preview_1.png) | ![preview 2](33/preview_2.png) | ![preview 3](33/preview_3.png) | ![preview 4](33/preview_4.png) | ![preview 5](33/preview_5.png) | ![preview 6](33/preview_6.png) | ![preview 7](33/preview_7.png) | ![preview 8](33/preview_8.png) | | 34 | 292 | [Download](34/dataset.zip) | ![preview 1](34/preview_1.png) | ![preview 2](34/preview_2.png) | ![preview 3](34/preview_3.png) | ![preview 4](34/preview_4.png) | ![preview 5](34/preview_5.png) | ![preview 6](34/preview_6.png) | ![preview 7](34/preview_7.png) | ![preview 8](34/preview_8.png) | | 35 | 115 | [Download](35/dataset.zip) | ![preview 1](35/preview_1.png) | ![preview 2](35/preview_2.png) | ![preview 3](35/preview_3.png) | ![preview 4](35/preview_4.png) | ![preview 5](35/preview_5.png) | ![preview 6](35/preview_6.png) | ![preview 7](35/preview_7.png) | ![preview 8](35/preview_8.png) | | 36 | 103 | [Download](36/dataset.zip) | ![preview 1](36/preview_1.png) | ![preview 2](36/preview_2.png) | ![preview 3](36/preview_3.png) | ![preview 4](36/preview_4.png) | ![preview 5](36/preview_5.png) | ![preview 6](36/preview_6.png) | ![preview 7](36/preview_7.png) | ![preview 8](36/preview_8.png) | | 37 | 96 | [Download](37/dataset.zip) | ![preview 1](37/preview_1.png) | ![preview 2](37/preview_2.png) | ![preview 3](37/preview_3.png) | ![preview 4](37/preview_4.png) | ![preview 5](37/preview_5.png) | ![preview 6](37/preview_6.png) | ![preview 7](37/preview_7.png) | ![preview 8](37/preview_8.png) | | 38 | 190 | [Download](38/dataset.zip) | ![preview 1](38/preview_1.png) | ![preview 2](38/preview_2.png) | ![preview 3](38/preview_3.png) | ![preview 4](38/preview_4.png) | ![preview 5](38/preview_5.png) | ![preview 6](38/preview_6.png) | ![preview 7](38/preview_7.png) | ![preview 8](38/preview_8.png) | | 39 | 49 | [Download](39/dataset.zip) | ![preview 1](39/preview_1.png) | ![preview 2](39/preview_2.png) | ![preview 3](39/preview_3.png) | ![preview 4](39/preview_4.png) | ![preview 5](39/preview_5.png) | ![preview 6](39/preview_6.png) | ![preview 7](39/preview_7.png) | ![preview 8](39/preview_8.png) | | 40 | 22 | [Download](40/dataset.zip) | ![preview 1](40/preview_1.png) | ![preview 2](40/preview_2.png) | ![preview 3](40/preview_3.png) | ![preview 4](40/preview_4.png) | ![preview 5](40/preview_5.png) | ![preview 6](40/preview_6.png) | ![preview 7](40/preview_7.png) | ![preview 8](40/preview_8.png) | | 41 | 65 | [Download](41/dataset.zip) | ![preview 1](41/preview_1.png) | ![preview 2](41/preview_2.png) | ![preview 3](41/preview_3.png) | ![preview 4](41/preview_4.png) | ![preview 5](41/preview_5.png) | ![preview 6](41/preview_6.png) | ![preview 7](41/preview_7.png) | ![preview 8](41/preview_8.png) | | 42 | 643 | [Download](42/dataset.zip) | ![preview 1](42/preview_1.png) | ![preview 2](42/preview_2.png) | ![preview 3](42/preview_3.png) | ![preview 4](42/preview_4.png) | ![preview 5](42/preview_5.png) | ![preview 6](42/preview_6.png) | ![preview 7](42/preview_7.png) | ![preview 8](42/preview_8.png) | | 43 | 59 | [Download](43/dataset.zip) | ![preview 1](43/preview_1.png) | ![preview 2](43/preview_2.png) | ![preview 3](43/preview_3.png) | ![preview 4](43/preview_4.png) | ![preview 5](43/preview_5.png) | ![preview 6](43/preview_6.png) | ![preview 7](43/preview_7.png) | ![preview 8](43/preview_8.png) | | 44 | 162 | [Download](44/dataset.zip) | ![preview 1](44/preview_1.png) | ![preview 2](44/preview_2.png) | ![preview 3](44/preview_3.png) | ![preview 4](44/preview_4.png) | ![preview 5](44/preview_5.png) | ![preview 6](44/preview_6.png) | ![preview 7](44/preview_7.png) | ![preview 8](44/preview_8.png) | | 45 | 347 | [Download](45/dataset.zip) | ![preview 1](45/preview_1.png) | ![preview 2](45/preview_2.png) | ![preview 3](45/preview_3.png) | ![preview 4](45/preview_4.png) | ![preview 5](45/preview_5.png) | ![preview 6](45/preview_6.png) | ![preview 7](45/preview_7.png) | ![preview 8](45/preview_8.png) | | 46 | 55 | [Download](46/dataset.zip) | ![preview 1](46/preview_1.png) | ![preview 2](46/preview_2.png) | ![preview 3](46/preview_3.png) | ![preview 4](46/preview_4.png) | ![preview 5](46/preview_5.png) | ![preview 6](46/preview_6.png) | ![preview 7](46/preview_7.png) | ![preview 8](46/preview_8.png) | | 47 | 122 | [Download](47/dataset.zip) | ![preview 1](47/preview_1.png) | ![preview 2](47/preview_2.png) | ![preview 3](47/preview_3.png) | ![preview 4](47/preview_4.png) | ![preview 5](47/preview_5.png) | ![preview 6](47/preview_6.png) | ![preview 7](47/preview_7.png) | ![preview 8](47/preview_8.png) | | 48 | 45 | [Download](48/dataset.zip) | ![preview 1](48/preview_1.png) | ![preview 2](48/preview_2.png) | ![preview 3](48/preview_3.png) | ![preview 4](48/preview_4.png) | ![preview 5](48/preview_5.png) | ![preview 6](48/preview_6.png) | ![preview 7](48/preview_7.png) | ![preview 8](48/preview_8.png) | | 49 | 179 | [Download](49/dataset.zip) | ![preview 1](49/preview_1.png) | ![preview 2](49/preview_2.png) | ![preview 3](49/preview_3.png) | ![preview 4](49/preview_4.png) | ![preview 5](49/preview_5.png) | ![preview 6](49/preview_6.png) | ![preview 7](49/preview_7.png) | ![preview 8](49/preview_8.png) | | 50 | 68 | [Download](50/dataset.zip) | ![preview 1](50/preview_1.png) | ![preview 2](50/preview_2.png) | ![preview 3](50/preview_3.png) | ![preview 4](50/preview_4.png) | ![preview 5](50/preview_5.png) | ![preview 6](50/preview_6.png) | ![preview 7](50/preview_7.png) | ![preview 8](50/preview_8.png) | | 51 | 88 | [Download](51/dataset.zip) | ![preview 1](51/preview_1.png) | ![preview 2](51/preview_2.png) | ![preview 3](51/preview_3.png) | ![preview 4](51/preview_4.png) | ![preview 5](51/preview_5.png) | ![preview 6](51/preview_6.png) | ![preview 7](51/preview_7.png) | ![preview 8](51/preview_8.png) | | 52 | 32 | [Download](52/dataset.zip) | ![preview 1](52/preview_1.png) | ![preview 2](52/preview_2.png) | ![preview 3](52/preview_3.png) | ![preview 4](52/preview_4.png) | ![preview 5](52/preview_5.png) | ![preview 6](52/preview_6.png) | ![preview 7](52/preview_7.png) | ![preview 8](52/preview_8.png) | | 53 | 33 | [Download](53/dataset.zip) | ![preview 1](53/preview_1.png) | ![preview 2](53/preview_2.png) | ![preview 3](53/preview_3.png) | ![preview 4](53/preview_4.png) | ![preview 5](53/preview_5.png) | ![preview 6](53/preview_6.png) | ![preview 7](53/preview_7.png) | ![preview 8](53/preview_8.png) | | 54 | 148 | [Download](54/dataset.zip) | ![preview 1](54/preview_1.png) | ![preview 2](54/preview_2.png) | ![preview 3](54/preview_3.png) | ![preview 4](54/preview_4.png) | ![preview 5](54/preview_5.png) | ![preview 6](54/preview_6.png) | ![preview 7](54/preview_7.png) | ![preview 8](54/preview_8.png) | | 55 | 228 | [Download](55/dataset.zip) | ![preview 1](55/preview_1.png) | ![preview 2](55/preview_2.png) | ![preview 3](55/preview_3.png) | ![preview 4](55/preview_4.png) | ![preview 5](55/preview_5.png) | ![preview 6](55/preview_6.png) | ![preview 7](55/preview_7.png) | ![preview 8](55/preview_8.png) | | 56 | 170 | [Download](56/dataset.zip) | ![preview 1](56/preview_1.png) | ![preview 2](56/preview_2.png) | ![preview 3](56/preview_3.png) | ![preview 4](56/preview_4.png) | ![preview 5](56/preview_5.png) | ![preview 6](56/preview_6.png) | ![preview 7](56/preview_7.png) | ![preview 8](56/preview_8.png) | | 57 | 112 | [Download](57/dataset.zip) | ![preview 1](57/preview_1.png) | ![preview 2](57/preview_2.png) | ![preview 3](57/preview_3.png) | ![preview 4](57/preview_4.png) | ![preview 5](57/preview_5.png) | ![preview 6](57/preview_6.png) | ![preview 7](57/preview_7.png) | ![preview 8](57/preview_8.png) | | 58 | 234 | [Download](58/dataset.zip) | ![preview 1](58/preview_1.png) | ![preview 2](58/preview_2.png) | ![preview 3](58/preview_3.png) | ![preview 4](58/preview_4.png) | ![preview 5](58/preview_5.png) | ![preview 6](58/preview_6.png) | ![preview 7](58/preview_7.png) | ![preview 8](58/preview_8.png) | | 59 | 29 | [Download](59/dataset.zip) | ![preview 1](59/preview_1.png) | ![preview 2](59/preview_2.png) | ![preview 3](59/preview_3.png) | ![preview 4](59/preview_4.png) | ![preview 5](59/preview_5.png) | ![preview 6](59/preview_6.png) | ![preview 7](59/preview_7.png) | ![preview 8](59/preview_8.png) | | 60 | 106 | [Download](60/dataset.zip) | ![preview 1](60/preview_1.png) | ![preview 2](60/preview_2.png) | ![preview 3](60/preview_3.png) | ![preview 4](60/preview_4.png) | ![preview 5](60/preview_5.png) | ![preview 6](60/preview_6.png) | ![preview 7](60/preview_7.png) | ![preview 8](60/preview_8.png) | | 61 | 247 | [Download](61/dataset.zip) | ![preview 1](61/preview_1.png) | ![preview 2](61/preview_2.png) | ![preview 3](61/preview_3.png) | ![preview 4](61/preview_4.png) | ![preview 5](61/preview_5.png) | ![preview 6](61/preview_6.png) | ![preview 7](61/preview_7.png) | ![preview 8](61/preview_8.png) | | 62 | 37 | [Download](62/dataset.zip) | ![preview 1](62/preview_1.png) | ![preview 2](62/preview_2.png) | ![preview 3](62/preview_3.png) | ![preview 4](62/preview_4.png) | ![preview 5](62/preview_5.png) | ![preview 6](62/preview_6.png) | ![preview 7](62/preview_7.png) | ![preview 8](62/preview_8.png) | | 63 | 66 | [Download](63/dataset.zip) | ![preview 1](63/preview_1.png) | ![preview 2](63/preview_2.png) | ![preview 3](63/preview_3.png) | ![preview 4](63/preview_4.png) | ![preview 5](63/preview_5.png) | ![preview 6](63/preview_6.png) | ![preview 7](63/preview_7.png) | ![preview 8](63/preview_8.png) | | 64 | 43 | [Download](64/dataset.zip) | ![preview 1](64/preview_1.png) | ![preview 2](64/preview_2.png) | ![preview 3](64/preview_3.png) | ![preview 4](64/preview_4.png) | ![preview 5](64/preview_5.png) | ![preview 6](64/preview_6.png) | ![preview 7](64/preview_7.png) | ![preview 8](64/preview_8.png) | | 65 | 34 | [Download](65/dataset.zip) | ![preview 1](65/preview_1.png) | ![preview 2](65/preview_2.png) | ![preview 3](65/preview_3.png) | ![preview 4](65/preview_4.png) | ![preview 5](65/preview_5.png) | ![preview 6](65/preview_6.png) | ![preview 7](65/preview_7.png) | ![preview 8](65/preview_8.png) | | 66 | 36 | [Download](66/dataset.zip) | ![preview 1](66/preview_1.png) | ![preview 2](66/preview_2.png) | ![preview 3](66/preview_3.png) | ![preview 4](66/preview_4.png) | ![preview 5](66/preview_5.png) | ![preview 6](66/preview_6.png) | ![preview 7](66/preview_7.png) | ![preview 8](66/preview_8.png) | | 67 | 36 | [Download](67/dataset.zip) | ![preview 1](67/preview_1.png) | ![preview 2](67/preview_2.png) | ![preview 3](67/preview_3.png) | ![preview 4](67/preview_4.png) | ![preview 5](67/preview_5.png) | ![preview 6](67/preview_6.png) | ![preview 7](67/preview_7.png) | ![preview 8](67/preview_8.png) | | 68 | 38 | [Download](68/dataset.zip) | ![preview 1](68/preview_1.png) | ![preview 2](68/preview_2.png) | ![preview 3](68/preview_3.png) | ![preview 4](68/preview_4.png) | ![preview 5](68/preview_5.png) | ![preview 6](68/preview_6.png) | ![preview 7](68/preview_7.png) | ![preview 8](68/preview_8.png) | | 69 | 12 | [Download](69/dataset.zip) | ![preview 1](69/preview_1.png) | ![preview 2](69/preview_2.png) | ![preview 3](69/preview_3.png) | ![preview 4](69/preview_4.png) | ![preview 5](69/preview_5.png) | ![preview 6](69/preview_6.png) | ![preview 7](69/preview_7.png) | ![preview 8](69/preview_8.png) | | 70 | 65 | [Download](70/dataset.zip) | ![preview 1](70/preview_1.png) | ![preview 2](70/preview_2.png) | ![preview 3](70/preview_3.png) | ![preview 4](70/preview_4.png) | ![preview 5](70/preview_5.png) | ![preview 6](70/preview_6.png) | ![preview 7](70/preview_7.png) | ![preview 8](70/preview_8.png) | | 71 | 81 | [Download](71/dataset.zip) | ![preview 1](71/preview_1.png) | ![preview 2](71/preview_2.png) | ![preview 3](71/preview_3.png) | ![preview 4](71/preview_4.png) | ![preview 5](71/preview_5.png) | ![preview 6](71/preview_6.png) | ![preview 7](71/preview_7.png) | ![preview 8](71/preview_8.png) | | 72 | 33 | [Download](72/dataset.zip) | ![preview 1](72/preview_1.png) | ![preview 2](72/preview_2.png) | ![preview 3](72/preview_3.png) | ![preview 4](72/preview_4.png) | ![preview 5](72/preview_5.png) | ![preview 6](72/preview_6.png) | ![preview 7](72/preview_7.png) | ![preview 8](72/preview_8.png) | | 73 | 16 | [Download](73/dataset.zip) | ![preview 1](73/preview_1.png) | ![preview 2](73/preview_2.png) | ![preview 3](73/preview_3.png) | ![preview 4](73/preview_4.png) | ![preview 5](73/preview_5.png) | ![preview 6](73/preview_6.png) | ![preview 7](73/preview_7.png) | ![preview 8](73/preview_8.png) | | 74 | 315 | [Download](74/dataset.zip) | ![preview 1](74/preview_1.png) | ![preview 2](74/preview_2.png) | ![preview 3](74/preview_3.png) | ![preview 4](74/preview_4.png) | ![preview 5](74/preview_5.png) | ![preview 6](74/preview_6.png) | ![preview 7](74/preview_7.png) | ![preview 8](74/preview_8.png) | | 75 | 15 | [Download](75/dataset.zip) | ![preview 1](75/preview_1.png) | ![preview 2](75/preview_2.png) | ![preview 3](75/preview_3.png) | ![preview 4](75/preview_4.png) | ![preview 5](75/preview_5.png) | ![preview 6](75/preview_6.png) | ![preview 7](75/preview_7.png) | ![preview 8](75/preview_8.png) | | 76 | 56 | [Download](76/dataset.zip) | ![preview 1](76/preview_1.png) | ![preview 2](76/preview_2.png) | ![preview 3](76/preview_3.png) | ![preview 4](76/preview_4.png) | ![preview 5](76/preview_5.png) | ![preview 6](76/preview_6.png) | ![preview 7](76/preview_7.png) | ![preview 8](76/preview_8.png) | | 77 | 50 | [Download](77/dataset.zip) | ![preview 1](77/preview_1.png) | ![preview 2](77/preview_2.png) | ![preview 3](77/preview_3.png) | ![preview 4](77/preview_4.png) | ![preview 5](77/preview_5.png) | ![preview 6](77/preview_6.png) | ![preview 7](77/preview_7.png) | ![preview 8](77/preview_8.png) | | 78 | 60 | [Download](78/dataset.zip) | ![preview 1](78/preview_1.png) | ![preview 2](78/preview_2.png) | ![preview 3](78/preview_3.png) | ![preview 4](78/preview_4.png) | ![preview 5](78/preview_5.png) | ![preview 6](78/preview_6.png) | ![preview 7](78/preview_7.png) | ![preview 8](78/preview_8.png) | | 79 | 48 | [Download](79/dataset.zip) | ![preview 1](79/preview_1.png) | ![preview 2](79/preview_2.png) | ![preview 3](79/preview_3.png) | ![preview 4](79/preview_4.png) | ![preview 5](79/preview_5.png) | ![preview 6](79/preview_6.png) | ![preview 7](79/preview_7.png) | ![preview 8](79/preview_8.png) | | 80 | 115 | [Download](80/dataset.zip) | ![preview 1](80/preview_1.png) | ![preview 2](80/preview_2.png) | ![preview 3](80/preview_3.png) | ![preview 4](80/preview_4.png) | ![preview 5](80/preview_5.png) | ![preview 6](80/preview_6.png) | ![preview 7](80/preview_7.png) | ![preview 8](80/preview_8.png) | | 81 | 15 | [Download](81/dataset.zip) | ![preview 1](81/preview_1.png) | ![preview 2](81/preview_2.png) | ![preview 3](81/preview_3.png) | ![preview 4](81/preview_4.png) | ![preview 5](81/preview_5.png) | ![preview 6](81/preview_6.png) | ![preview 7](81/preview_7.png) | ![preview 8](81/preview_8.png) | | 82 | 163 | [Download](82/dataset.zip) | ![preview 1](82/preview_1.png) | ![preview 2](82/preview_2.png) | ![preview 3](82/preview_3.png) | ![preview 4](82/preview_4.png) | ![preview 5](82/preview_5.png) | ![preview 6](82/preview_6.png) | ![preview 7](82/preview_7.png) | ![preview 8](82/preview_8.png) | | 83 | 36 | [Download](83/dataset.zip) | ![preview 1](83/preview_1.png) | ![preview 2](83/preview_2.png) | ![preview 3](83/preview_3.png) | ![preview 4](83/preview_4.png) | ![preview 5](83/preview_5.png) | ![preview 6](83/preview_6.png) | ![preview 7](83/preview_7.png) | ![preview 8](83/preview_8.png) | | 84 | 237 | [Download](84/dataset.zip) | ![preview 1](84/preview_1.png) | ![preview 2](84/preview_2.png) | ![preview 3](84/preview_3.png) | ![preview 4](84/preview_4.png) | ![preview 5](84/preview_5.png) | ![preview 6](84/preview_6.png) | ![preview 7](84/preview_7.png) | ![preview 8](84/preview_8.png) | | 85 | 20 | [Download](85/dataset.zip) | ![preview 1](85/preview_1.png) | ![preview 2](85/preview_2.png) | ![preview 3](85/preview_3.png) | ![preview 4](85/preview_4.png) | ![preview 5](85/preview_5.png) | ![preview 6](85/preview_6.png) | ![preview 7](85/preview_7.png) | ![preview 8](85/preview_8.png) | | 86 | 1991 | [Download](86/dataset.zip) | ![preview 1](86/preview_1.png) | ![preview 2](86/preview_2.png) | ![preview 3](86/preview_3.png) | ![preview 4](86/preview_4.png) | ![preview 5](86/preview_5.png) | ![preview 6](86/preview_6.png) | ![preview 7](86/preview_7.png) | ![preview 8](86/preview_8.png) | | 87 | 36 | [Download](87/dataset.zip) | ![preview 1](87/preview_1.png) | ![preview 2](87/preview_2.png) | ![preview 3](87/preview_3.png) | ![preview 4](87/preview_4.png) | ![preview 5](87/preview_5.png) | ![preview 6](87/preview_6.png) | ![preview 7](87/preview_7.png) | ![preview 8](87/preview_8.png) | | 88 | 62 | [Download](88/dataset.zip) | ![preview 1](88/preview_1.png) | ![preview 2](88/preview_2.png) | ![preview 3](88/preview_3.png) | ![preview 4](88/preview_4.png) | ![preview 5](88/preview_5.png) | ![preview 6](88/preview_6.png) | ![preview 7](88/preview_7.png) | ![preview 8](88/preview_8.png) | | 89 | 63 | [Download](89/dataset.zip) | ![preview 1](89/preview_1.png) | ![preview 2](89/preview_2.png) | ![preview 3](89/preview_3.png) | ![preview 4](89/preview_4.png) | ![preview 5](89/preview_5.png) | ![preview 6](89/preview_6.png) | ![preview 7](89/preview_7.png) | ![preview 8](89/preview_8.png) | | 90 | 28 | [Download](90/dataset.zip) | ![preview 1](90/preview_1.png) | ![preview 2](90/preview_2.png) | ![preview 3](90/preview_3.png) | ![preview 4](90/preview_4.png) | ![preview 5](90/preview_5.png) | ![preview 6](90/preview_6.png) | ![preview 7](90/preview_7.png) | ![preview 8](90/preview_8.png) | | 91 | 57 | [Download](91/dataset.zip) | ![preview 1](91/preview_1.png) | ![preview 2](91/preview_2.png) | ![preview 3](91/preview_3.png) | ![preview 4](91/preview_4.png) | ![preview 5](91/preview_5.png) | ![preview 6](91/preview_6.png) | ![preview 7](91/preview_7.png) | ![preview 8](91/preview_8.png) | | 92 | 48 | [Download](92/dataset.zip) | ![preview 1](92/preview_1.png) | ![preview 2](92/preview_2.png) | ![preview 3](92/preview_3.png) | ![preview 4](92/preview_4.png) | ![preview 5](92/preview_5.png) | ![preview 6](92/preview_6.png) | ![preview 7](92/preview_7.png) | ![preview 8](92/preview_8.png) | | 93 | 54 | [Download](93/dataset.zip) | ![preview 1](93/preview_1.png) | ![preview 2](93/preview_2.png) | ![preview 3](93/preview_3.png) | ![preview 4](93/preview_4.png) | ![preview 5](93/preview_5.png) | ![preview 6](93/preview_6.png) | ![preview 7](93/preview_7.png) | ![preview 8](93/preview_8.png) | | 94 | 17 | [Download](94/dataset.zip) | ![preview 1](94/preview_1.png) | ![preview 2](94/preview_2.png) | ![preview 3](94/preview_3.png) | ![preview 4](94/preview_4.png) | ![preview 5](94/preview_5.png) | ![preview 6](94/preview_6.png) | ![preview 7](94/preview_7.png) | ![preview 8](94/preview_8.png) | | 95 | 60 | [Download](95/dataset.zip) | ![preview 1](95/preview_1.png) | ![preview 2](95/preview_2.png) | ![preview 3](95/preview_3.png) | ![preview 4](95/preview_4.png) | ![preview 5](95/preview_5.png) | ![preview 6](95/preview_6.png) | ![preview 7](95/preview_7.png) | ![preview 8](95/preview_8.png) | | 96 | 69 | [Download](96/dataset.zip) | ![preview 1](96/preview_1.png) | ![preview 2](96/preview_2.png) | ![preview 3](96/preview_3.png) | ![preview 4](96/preview_4.png) | ![preview 5](96/preview_5.png) | ![preview 6](96/preview_6.png) | ![preview 7](96/preview_7.png) | ![preview 8](96/preview_8.png) | | 97 | 36 | [Download](97/dataset.zip) | ![preview 1](97/preview_1.png) | ![preview 2](97/preview_2.png) | ![preview 3](97/preview_3.png) | ![preview 4](97/preview_4.png) | ![preview 5](97/preview_5.png) | ![preview 6](97/preview_6.png) | ![preview 7](97/preview_7.png) | ![preview 8](97/preview_8.png) | | 98 | 33 | [Download](98/dataset.zip) | ![preview 1](98/preview_1.png) | ![preview 2](98/preview_2.png) | ![preview 3](98/preview_3.png) | ![preview 4](98/preview_4.png) | ![preview 5](98/preview_5.png) | ![preview 6](98/preview_6.png) | ![preview 7](98/preview_7.png) | ![preview 8](98/preview_8.png) | | 99 | 67 | [Download](99/dataset.zip) | ![preview 1](99/preview_1.png) | ![preview 2](99/preview_2.png) | ![preview 3](99/preview_3.png) | ![preview 4](99/preview_4.png) | ![preview 5](99/preview_5.png) | ![preview 6](99/preview_6.png) | ![preview 7](99/preview_7.png) | ![preview 8](99/preview_8.png) | | 100 | 128 | [Download](100/dataset.zip) | ![preview 1](100/preview_1.png) | ![preview 2](100/preview_2.png) | ![preview 3](100/preview_3.png) | ![preview 4](100/preview_4.png) | ![preview 5](100/preview_5.png) | ![preview 6](100/preview_6.png) | ![preview 7](100/preview_7.png) | ![preview 8](100/preview_8.png) | | 101 | 34 | [Download](101/dataset.zip) | ![preview 1](101/preview_1.png) | ![preview 2](101/preview_2.png) | ![preview 3](101/preview_3.png) | ![preview 4](101/preview_4.png) | ![preview 5](101/preview_5.png) | ![preview 6](101/preview_6.png) | ![preview 7](101/preview_7.png) | ![preview 8](101/preview_8.png) | | 102 | 11 | [Download](102/dataset.zip) | ![preview 1](102/preview_1.png) | ![preview 2](102/preview_2.png) | ![preview 3](102/preview_3.png) | ![preview 4](102/preview_4.png) | ![preview 5](102/preview_5.png) | ![preview 6](102/preview_6.png) | ![preview 7](102/preview_7.png) | ![preview 8](102/preview_8.png) | | 103 | 114 | [Download](103/dataset.zip) | ![preview 1](103/preview_1.png) | ![preview 2](103/preview_2.png) | ![preview 3](103/preview_3.png) | ![preview 4](103/preview_4.png) | ![preview 5](103/preview_5.png) | ![preview 6](103/preview_6.png) | ![preview 7](103/preview_7.png) | ![preview 8](103/preview_8.png) | | 104 | 63 | [Download](104/dataset.zip) | ![preview 1](104/preview_1.png) | ![preview 2](104/preview_2.png) | ![preview 3](104/preview_3.png) | ![preview 4](104/preview_4.png) | ![preview 5](104/preview_5.png) | ![preview 6](104/preview_6.png) | ![preview 7](104/preview_7.png) | ![preview 8](104/preview_8.png) | | 105 | 22 | [Download](105/dataset.zip) | ![preview 1](105/preview_1.png) | ![preview 2](105/preview_2.png) | ![preview 3](105/preview_3.png) | ![preview 4](105/preview_4.png) | ![preview 5](105/preview_5.png) | ![preview 6](105/preview_6.png) | ![preview 7](105/preview_7.png) | ![preview 8](105/preview_8.png) | | 106 | 15 | [Download](106/dataset.zip) | ![preview 1](106/preview_1.png) | ![preview 2](106/preview_2.png) | ![preview 3](106/preview_3.png) | ![preview 4](106/preview_4.png) | ![preview 5](106/preview_5.png) | ![preview 6](106/preview_6.png) | ![preview 7](106/preview_7.png) | ![preview 8](106/preview_8.png) | | 107 | 53 | [Download](107/dataset.zip) | ![preview 1](107/preview_1.png) | ![preview 2](107/preview_2.png) | ![preview 3](107/preview_3.png) | ![preview 4](107/preview_4.png) | ![preview 5](107/preview_5.png) | ![preview 6](107/preview_6.png) | ![preview 7](107/preview_7.png) | ![preview 8](107/preview_8.png) | | 108 | 88 | [Download](108/dataset.zip) | ![preview 1](108/preview_1.png) | ![preview 2](108/preview_2.png) | ![preview 3](108/preview_3.png) | ![preview 4](108/preview_4.png) | ![preview 5](108/preview_5.png) | ![preview 6](108/preview_6.png) | ![preview 7](108/preview_7.png) | ![preview 8](108/preview_8.png) | | 109 | 26 | [Download](109/dataset.zip) | ![preview 1](109/preview_1.png) | ![preview 2](109/preview_2.png) | ![preview 3](109/preview_3.png) | ![preview 4](109/preview_4.png) | ![preview 5](109/preview_5.png) | ![preview 6](109/preview_6.png) | ![preview 7](109/preview_7.png) | ![preview 8](109/preview_8.png) | | 110 | 26 | [Download](110/dataset.zip) | ![preview 1](110/preview_1.png) | ![preview 2](110/preview_2.png) | ![preview 3](110/preview_3.png) | ![preview 4](110/preview_4.png) | ![preview 5](110/preview_5.png) | ![preview 6](110/preview_6.png) | ![preview 7](110/preview_7.png) | ![preview 8](110/preview_8.png) | | 111 | 50 | [Download](111/dataset.zip) | ![preview 1](111/preview_1.png) | ![preview 2](111/preview_2.png) | ![preview 3](111/preview_3.png) | ![preview 4](111/preview_4.png) | ![preview 5](111/preview_5.png) | ![preview 6](111/preview_6.png) | ![preview 7](111/preview_7.png) | ![preview 8](111/preview_8.png) | | 112 | 26 | [Download](112/dataset.zip) | ![preview 1](112/preview_1.png) | ![preview 2](112/preview_2.png) | ![preview 3](112/preview_3.png) | ![preview 4](112/preview_4.png) | ![preview 5](112/preview_5.png) | ![preview 6](112/preview_6.png) | ![preview 7](112/preview_7.png) | ![preview 8](112/preview_8.png) | | 113 | 99 | [Download](113/dataset.zip) | ![preview 1](113/preview_1.png) | ![preview 2](113/preview_2.png) | ![preview 3](113/preview_3.png) | ![preview 4](113/preview_4.png) | ![preview 5](113/preview_5.png) | ![preview 6](113/preview_6.png) | ![preview 7](113/preview_7.png) | ![preview 8](113/preview_8.png) | | 114 | 29 | [Download](114/dataset.zip) | ![preview 1](114/preview_1.png) | ![preview 2](114/preview_2.png) | ![preview 3](114/preview_3.png) | ![preview 4](114/preview_4.png) | ![preview 5](114/preview_5.png) | ![preview 6](114/preview_6.png) | ![preview 7](114/preview_7.png) | ![preview 8](114/preview_8.png) | | 115 | 67 | [Download](115/dataset.zip) | ![preview 1](115/preview_1.png) | ![preview 2](115/preview_2.png) | ![preview 3](115/preview_3.png) | ![preview 4](115/preview_4.png) | ![preview 5](115/preview_5.png) | ![preview 6](115/preview_6.png) | ![preview 7](115/preview_7.png) | ![preview 8](115/preview_8.png) | | 116 | 18 | [Download](116/dataset.zip) | ![preview 1](116/preview_1.png) | ![preview 2](116/preview_2.png) | ![preview 3](116/preview_3.png) | ![preview 4](116/preview_4.png) | ![preview 5](116/preview_5.png) | ![preview 6](116/preview_6.png) | ![preview 7](116/preview_7.png) | ![preview 8](116/preview_8.png) | | 117 | 8 | [Download](117/dataset.zip) | ![preview 1](117/preview_1.png) | ![preview 2](117/preview_2.png) | ![preview 3](117/preview_3.png) | ![preview 4](117/preview_4.png) | ![preview 5](117/preview_5.png) | ![preview 6](117/preview_6.png) | ![preview 7](117/preview_7.png) | ![preview 8](117/preview_8.png) | | 118 | 34 | [Download](118/dataset.zip) | ![preview 1](118/preview_1.png) | ![preview 2](118/preview_2.png) | ![preview 3](118/preview_3.png) | ![preview 4](118/preview_4.png) | ![preview 5](118/preview_5.png) | ![preview 6](118/preview_6.png) | ![preview 7](118/preview_7.png) | ![preview 8](118/preview_8.png) | | 119 | 21 | [Download](119/dataset.zip) | ![preview 1](119/preview_1.png) | ![preview 2](119/preview_2.png) | ![preview 3](119/preview_3.png) | ![preview 4](119/preview_4.png) | ![preview 5](119/preview_5.png) | ![preview 6](119/preview_6.png) | ![preview 7](119/preview_7.png) | ![preview 8](119/preview_8.png) | | 120 | 15 | [Download](120/dataset.zip) | ![preview 1](120/preview_1.png) | ![preview 2](120/preview_2.png) | ![preview 3](120/preview_3.png) | ![preview 4](120/preview_4.png) | ![preview 5](120/preview_5.png) | ![preview 6](120/preview_6.png) | ![preview 7](120/preview_7.png) | ![preview 8](120/preview_8.png) | | 121 | 22 | [Download](121/dataset.zip) | ![preview 1](121/preview_1.png) | ![preview 2](121/preview_2.png) | ![preview 3](121/preview_3.png) | ![preview 4](121/preview_4.png) | ![preview 5](121/preview_5.png) | ![preview 6](121/preview_6.png) | ![preview 7](121/preview_7.png) | ![preview 8](121/preview_8.png) | | 122 | 26 | [Download](122/dataset.zip) | ![preview 1](122/preview_1.png) | ![preview 2](122/preview_2.png) | ![preview 3](122/preview_3.png) | ![preview 4](122/preview_4.png) | ![preview 5](122/preview_5.png) | ![preview 6](122/preview_6.png) | ![preview 7](122/preview_7.png) | ![preview 8](122/preview_8.png) | | 123 | 32 | [Download](123/dataset.zip) | ![preview 1](123/preview_1.png) | ![preview 2](123/preview_2.png) | ![preview 3](123/preview_3.png) | ![preview 4](123/preview_4.png) | ![preview 5](123/preview_5.png) | ![preview 6](123/preview_6.png) | ![preview 7](123/preview_7.png) | ![preview 8](123/preview_8.png) | | 124 | 16 | [Download](124/dataset.zip) | ![preview 1](124/preview_1.png) | ![preview 2](124/preview_2.png) | ![preview 3](124/preview_3.png) | ![preview 4](124/preview_4.png) | ![preview 5](124/preview_5.png) | ![preview 6](124/preview_6.png) | ![preview 7](124/preview_7.png) | ![preview 8](124/preview_8.png) | | 125 | 22 | [Download](125/dataset.zip) | ![preview 1](125/preview_1.png) | ![preview 2](125/preview_2.png) | ![preview 3](125/preview_3.png) | ![preview 4](125/preview_4.png) | ![preview 5](125/preview_5.png) | ![preview 6](125/preview_6.png) | ![preview 7](125/preview_7.png) | ![preview 8](125/preview_8.png) | | 126 | 45 | [Download](126/dataset.zip) | ![preview 1](126/preview_1.png) | ![preview 2](126/preview_2.png) | ![preview 3](126/preview_3.png) | ![preview 4](126/preview_4.png) | ![preview 5](126/preview_5.png) | ![preview 6](126/preview_6.png) | ![preview 7](126/preview_7.png) | ![preview 8](126/preview_8.png) | | 127 | 12 | [Download](127/dataset.zip) | ![preview 1](127/preview_1.png) | ![preview 2](127/preview_2.png) | ![preview 3](127/preview_3.png) | ![preview 4](127/preview_4.png) | ![preview 5](127/preview_5.png) | ![preview 6](127/preview_6.png) | ![preview 7](127/preview_7.png) | ![preview 8](127/preview_8.png) | | 128 | 40 | [Download](128/dataset.zip) | ![preview 1](128/preview_1.png) | ![preview 2](128/preview_2.png) | ![preview 3](128/preview_3.png) | ![preview 4](128/preview_4.png) | ![preview 5](128/preview_5.png) | ![preview 6](128/preview_6.png) | ![preview 7](128/preview_7.png) | ![preview 8](128/preview_8.png) | | 129 | 28 | [Download](129/dataset.zip) | ![preview 1](129/preview_1.png) | ![preview 2](129/preview_2.png) | ![preview 3](129/preview_3.png) | ![preview 4](129/preview_4.png) | ![preview 5](129/preview_5.png) | ![preview 6](129/preview_6.png) | ![preview 7](129/preview_7.png) | ![preview 8](129/preview_8.png) | | 130 | 55 | [Download](130/dataset.zip) | ![preview 1](130/preview_1.png) | ![preview 2](130/preview_2.png) | ![preview 3](130/preview_3.png) | ![preview 4](130/preview_4.png) | ![preview 5](130/preview_5.png) | ![preview 6](130/preview_6.png) | ![preview 7](130/preview_7.png) | ![preview 8](130/preview_8.png) | | 131 | 22 | [Download](131/dataset.zip) | ![preview 1](131/preview_1.png) | ![preview 2](131/preview_2.png) | ![preview 3](131/preview_3.png) | ![preview 4](131/preview_4.png) | ![preview 5](131/preview_5.png) | ![preview 6](131/preview_6.png) | ![preview 7](131/preview_7.png) | ![preview 8](131/preview_8.png) | | 132 | 53 | [Download](132/dataset.zip) | ![preview 1](132/preview_1.png) | ![preview 2](132/preview_2.png) | ![preview 3](132/preview_3.png) | ![preview 4](132/preview_4.png) | ![preview 5](132/preview_5.png) | ![preview 6](132/preview_6.png) | ![preview 7](132/preview_7.png) | ![preview 8](132/preview_8.png) | | 133 | 30 | [Download](133/dataset.zip) | ![preview 1](133/preview_1.png) | ![preview 2](133/preview_2.png) | ![preview 3](133/preview_3.png) | ![preview 4](133/preview_4.png) | ![preview 5](133/preview_5.png) | ![preview 6](133/preview_6.png) | ![preview 7](133/preview_7.png) | ![preview 8](133/preview_8.png) | | 134 | 18 | [Download](134/dataset.zip) | ![preview 1](134/preview_1.png) | ![preview 2](134/preview_2.png) | ![preview 3](134/preview_3.png) | ![preview 4](134/preview_4.png) | ![preview 5](134/preview_5.png) | ![preview 6](134/preview_6.png) | ![preview 7](134/preview_7.png) | ![preview 8](134/preview_8.png) | | 135 | 35 | [Download](135/dataset.zip) | ![preview 1](135/preview_1.png) | ![preview 2](135/preview_2.png) | ![preview 3](135/preview_3.png) | ![preview 4](135/preview_4.png) | ![preview 5](135/preview_5.png) | ![preview 6](135/preview_6.png) | ![preview 7](135/preview_7.png) | ![preview 8](135/preview_8.png) | | 136 | 31 | [Download](136/dataset.zip) | ![preview 1](136/preview_1.png) | ![preview 2](136/preview_2.png) | ![preview 3](136/preview_3.png) | ![preview 4](136/preview_4.png) | ![preview 5](136/preview_5.png) | ![preview 6](136/preview_6.png) | ![preview 7](136/preview_7.png) | ![preview 8](136/preview_8.png) | | 137 | 60 | [Download](137/dataset.zip) | ![preview 1](137/preview_1.png) | ![preview 2](137/preview_2.png) | ![preview 3](137/preview_3.png) | ![preview 4](137/preview_4.png) | ![preview 5](137/preview_5.png) | ![preview 6](137/preview_6.png) | ![preview 7](137/preview_7.png) | ![preview 8](137/preview_8.png) | | 138 | 52 | [Download](138/dataset.zip) | ![preview 1](138/preview_1.png) | ![preview 2](138/preview_2.png) | ![preview 3](138/preview_3.png) | ![preview 4](138/preview_4.png) | ![preview 5](138/preview_5.png) | ![preview 6](138/preview_6.png) | ![preview 7](138/preview_7.png) | ![preview 8](138/preview_8.png) | | 139 | 16 | [Download](139/dataset.zip) | ![preview 1](139/preview_1.png) | ![preview 2](139/preview_2.png) | ![preview 3](139/preview_3.png) | ![preview 4](139/preview_4.png) | ![preview 5](139/preview_5.png) | ![preview 6](139/preview_6.png) | ![preview 7](139/preview_7.png) | ![preview 8](139/preview_8.png) | | 140 | 17 | [Download](140/dataset.zip) | ![preview 1](140/preview_1.png) | ![preview 2](140/preview_2.png) | ![preview 3](140/preview_3.png) | ![preview 4](140/preview_4.png) | ![preview 5](140/preview_5.png) | ![preview 6](140/preview_6.png) | ![preview 7](140/preview_7.png) | ![preview 8](140/preview_8.png) | | 141 | 41 | [Download](141/dataset.zip) | ![preview 1](141/preview_1.png) | ![preview 2](141/preview_2.png) | ![preview 3](141/preview_3.png) | ![preview 4](141/preview_4.png) | ![preview 5](141/preview_5.png) | ![preview 6](141/preview_6.png) | ![preview 7](141/preview_7.png) | ![preview 8](141/preview_8.png) | | 142 | 49 | [Download](142/dataset.zip) | ![preview 1](142/preview_1.png) | ![preview 2](142/preview_2.png) | ![preview 3](142/preview_3.png) | ![preview 4](142/preview_4.png) | ![preview 5](142/preview_5.png) | ![preview 6](142/preview_6.png) | ![preview 7](142/preview_7.png) | ![preview 8](142/preview_8.png) | | 143 | 37 | [Download](143/dataset.zip) | ![preview 1](143/preview_1.png) | ![preview 2](143/preview_2.png) | ![preview 3](143/preview_3.png) | ![preview 4](143/preview_4.png) | ![preview 5](143/preview_5.png) | ![preview 6](143/preview_6.png) | ![preview 7](143/preview_7.png) | ![preview 8](143/preview_8.png) | | 144 | 14 | [Download](144/dataset.zip) | ![preview 1](144/preview_1.png) | ![preview 2](144/preview_2.png) | ![preview 3](144/preview_3.png) | ![preview 4](144/preview_4.png) | ![preview 5](144/preview_5.png) | ![preview 6](144/preview_6.png) | ![preview 7](144/preview_7.png) | ![preview 8](144/preview_8.png) | | 145 | 26 | [Download](145/dataset.zip) | ![preview 1](145/preview_1.png) | ![preview 2](145/preview_2.png) | ![preview 3](145/preview_3.png) | ![preview 4](145/preview_4.png) | ![preview 5](145/preview_5.png) | ![preview 6](145/preview_6.png) | ![preview 7](145/preview_7.png) | ![preview 8](145/preview_8.png) | | 146 | 31 | [Download](146/dataset.zip) | ![preview 1](146/preview_1.png) | ![preview 2](146/preview_2.png) | ![preview 3](146/preview_3.png) | ![preview 4](146/preview_4.png) | ![preview 5](146/preview_5.png) | ![preview 6](146/preview_6.png) | ![preview 7](146/preview_7.png) | ![preview 8](146/preview_8.png) | | 147 | 32 | [Download](147/dataset.zip) | ![preview 1](147/preview_1.png) | ![preview 2](147/preview_2.png) | ![preview 3](147/preview_3.png) | ![preview 4](147/preview_4.png) | ![preview 5](147/preview_5.png) | ![preview 6](147/preview_6.png) | ![preview 7](147/preview_7.png) | ![preview 8](147/preview_8.png) | | 148 | 21 | [Download](148/dataset.zip) | ![preview 1](148/preview_1.png) | ![preview 2](148/preview_2.png) | ![preview 3](148/preview_3.png) | ![preview 4](148/preview_4.png) | ![preview 5](148/preview_5.png) | ![preview 6](148/preview_6.png) | ![preview 7](148/preview_7.png) | ![preview 8](148/preview_8.png) | | 149 | 28 | [Download](149/dataset.zip) | ![preview 1](149/preview_1.png) | ![preview 2](149/preview_2.png) | ![preview 3](149/preview_3.png) | ![preview 4](149/preview_4.png) | ![preview 5](149/preview_5.png) | ![preview 6](149/preview_6.png) | ![preview 7](149/preview_7.png) | ![preview 8](149/preview_8.png) | | 150 | 15 | [Download](150/dataset.zip) | ![preview 1](150/preview_1.png) | ![preview 2](150/preview_2.png) | ![preview 3](150/preview_3.png) | ![preview 4](150/preview_4.png) | ![preview 5](150/preview_5.png) | ![preview 6](150/preview_6.png) | ![preview 7](150/preview_7.png) | ![preview 8](150/preview_8.png) | | 151 | 21 | [Download](151/dataset.zip) | ![preview 1](151/preview_1.png) | ![preview 2](151/preview_2.png) | ![preview 3](151/preview_3.png) | ![preview 4](151/preview_4.png) | ![preview 5](151/preview_5.png) | ![preview 6](151/preview_6.png) | ![preview 7](151/preview_7.png) | ![preview 8](151/preview_8.png) | | 152 | 33 | [Download](152/dataset.zip) | ![preview 1](152/preview_1.png) | ![preview 2](152/preview_2.png) | ![preview 3](152/preview_3.png) | ![preview 4](152/preview_4.png) | ![preview 5](152/preview_5.png) | ![preview 6](152/preview_6.png) | ![preview 7](152/preview_7.png) | ![preview 8](152/preview_8.png) | | 153 | 26 | [Download](153/dataset.zip) | ![preview 1](153/preview_1.png) | ![preview 2](153/preview_2.png) | ![preview 3](153/preview_3.png) | ![preview 4](153/preview_4.png) | ![preview 5](153/preview_5.png) | ![preview 6](153/preview_6.png) | ![preview 7](153/preview_7.png) | ![preview 8](153/preview_8.png) | | 154 | 17 | [Download](154/dataset.zip) | ![preview 1](154/preview_1.png) | ![preview 2](154/preview_2.png) | ![preview 3](154/preview_3.png) | ![preview 4](154/preview_4.png) | ![preview 5](154/preview_5.png) | ![preview 6](154/preview_6.png) | ![preview 7](154/preview_7.png) | ![preview 8](154/preview_8.png) | | 155 | 14 | [Download](155/dataset.zip) | ![preview 1](155/preview_1.png) | ![preview 2](155/preview_2.png) | ![preview 3](155/preview_3.png) | ![preview 4](155/preview_4.png) | ![preview 5](155/preview_5.png) | ![preview 6](155/preview_6.png) | ![preview 7](155/preview_7.png) | ![preview 8](155/preview_8.png) | | 156 | 27 | [Download](156/dataset.zip) | ![preview 1](156/preview_1.png) | ![preview 2](156/preview_2.png) | ![preview 3](156/preview_3.png) | ![preview 4](156/preview_4.png) | ![preview 5](156/preview_5.png) | ![preview 6](156/preview_6.png) | ![preview 7](156/preview_7.png) | ![preview 8](156/preview_8.png) | | 157 | 15 | [Download](157/dataset.zip) | ![preview 1](157/preview_1.png) | ![preview 2](157/preview_2.png) | ![preview 3](157/preview_3.png) | ![preview 4](157/preview_4.png) | ![preview 5](157/preview_5.png) | ![preview 6](157/preview_6.png) | ![preview 7](157/preview_7.png) | ![preview 8](157/preview_8.png) | | 158 | 12 | [Download](158/dataset.zip) | ![preview 1](158/preview_1.png) | ![preview 2](158/preview_2.png) | ![preview 3](158/preview_3.png) | ![preview 4](158/preview_4.png) | ![preview 5](158/preview_5.png) | ![preview 6](158/preview_6.png) | ![preview 7](158/preview_7.png) | ![preview 8](158/preview_8.png) | | 159 | 21 | [Download](159/dataset.zip) | ![preview 1](159/preview_1.png) | ![preview 2](159/preview_2.png) | ![preview 3](159/preview_3.png) | ![preview 4](159/preview_4.png) | ![preview 5](159/preview_5.png) | ![preview 6](159/preview_6.png) | ![preview 7](159/preview_7.png) | ![preview 8](159/preview_8.png) | | 160 | 31 | [Download](160/dataset.zip) | ![preview 1](160/preview_1.png) | ![preview 2](160/preview_2.png) | ![preview 3](160/preview_3.png) | ![preview 4](160/preview_4.png) | ![preview 5](160/preview_5.png) | ![preview 6](160/preview_6.png) | ![preview 7](160/preview_7.png) | ![preview 8](160/preview_8.png) | | 161 | 21 | [Download](161/dataset.zip) | ![preview 1](161/preview_1.png) | ![preview 2](161/preview_2.png) | ![preview 3](161/preview_3.png) | ![preview 4](161/preview_4.png) | ![preview 5](161/preview_5.png) | ![preview 6](161/preview_6.png) | ![preview 7](161/preview_7.png) | ![preview 8](161/preview_8.png) | | 162 | 11 | [Download](162/dataset.zip) | ![preview 1](162/preview_1.png) | ![preview 2](162/preview_2.png) | ![preview 3](162/preview_3.png) | ![preview 4](162/preview_4.png) | ![preview 5](162/preview_5.png) | ![preview 6](162/preview_6.png) | ![preview 7](162/preview_7.png) | ![preview 8](162/preview_8.png) | | 163 | 13 | [Download](163/dataset.zip) | ![preview 1](163/preview_1.png) | ![preview 2](163/preview_2.png) | ![preview 3](163/preview_3.png) | ![preview 4](163/preview_4.png) | ![preview 5](163/preview_5.png) | ![preview 6](163/preview_6.png) | ![preview 7](163/preview_7.png) | ![preview 8](163/preview_8.png) | | 164 | 32 | [Download](164/dataset.zip) | ![preview 1](164/preview_1.png) | ![preview 2](164/preview_2.png) | ![preview 3](164/preview_3.png) | ![preview 4](164/preview_4.png) | ![preview 5](164/preview_5.png) | ![preview 6](164/preview_6.png) | ![preview 7](164/preview_7.png) | ![preview 8](164/preview_8.png) | | 165 | 8 | [Download](165/dataset.zip) | ![preview 1](165/preview_1.png) | ![preview 2](165/preview_2.png) | ![preview 3](165/preview_3.png) | ![preview 4](165/preview_4.png) | ![preview 5](165/preview_5.png) | ![preview 6](165/preview_6.png) | ![preview 7](165/preview_7.png) | ![preview 8](165/preview_8.png) | | 166 | 16 | [Download](166/dataset.zip) | ![preview 1](166/preview_1.png) | ![preview 2](166/preview_2.png) | ![preview 3](166/preview_3.png) | ![preview 4](166/preview_4.png) | ![preview 5](166/preview_5.png) | ![preview 6](166/preview_6.png) | ![preview 7](166/preview_7.png) | ![preview 8](166/preview_8.png) | | 167 | 16 | [Download](167/dataset.zip) | ![preview 1](167/preview_1.png) | ![preview 2](167/preview_2.png) | ![preview 3](167/preview_3.png) | ![preview 4](167/preview_4.png) | ![preview 5](167/preview_5.png) | ![preview 6](167/preview_6.png) | ![preview 7](167/preview_7.png) | ![preview 8](167/preview_8.png) | | 168 | 19 | [Download](168/dataset.zip) | ![preview 1](168/preview_1.png) | ![preview 2](168/preview_2.png) | ![preview 3](168/preview_3.png) | ![preview 4](168/preview_4.png) | ![preview 5](168/preview_5.png) | ![preview 6](168/preview_6.png) | ![preview 7](168/preview_7.png) | ![preview 8](168/preview_8.png) | | 169 | 22 | [Download](169/dataset.zip) | ![preview 1](169/preview_1.png) | ![preview 2](169/preview_2.png) | ![preview 3](169/preview_3.png) | ![preview 4](169/preview_4.png) | ![preview 5](169/preview_5.png) | ![preview 6](169/preview_6.png) | ![preview 7](169/preview_7.png) | ![preview 8](169/preview_8.png) | | 170 | 8 | [Download](170/dataset.zip) | ![preview 1](170/preview_1.png) | ![preview 2](170/preview_2.png) | ![preview 3](170/preview_3.png) | ![preview 4](170/preview_4.png) | ![preview 5](170/preview_5.png) | ![preview 6](170/preview_6.png) | ![preview 7](170/preview_7.png) | ![preview 8](170/preview_8.png) | | 171 | 21 | [Download](171/dataset.zip) | ![preview 1](171/preview_1.png) | ![preview 2](171/preview_2.png) | ![preview 3](171/preview_3.png) | ![preview 4](171/preview_4.png) | ![preview 5](171/preview_5.png) | ![preview 6](171/preview_6.png) | ![preview 7](171/preview_7.png) | ![preview 8](171/preview_8.png) | | 172 | 9 | [Download](172/dataset.zip) | ![preview 1](172/preview_1.png) | ![preview 2](172/preview_2.png) | ![preview 3](172/preview_3.png) | ![preview 4](172/preview_4.png) | ![preview 5](172/preview_5.png) | ![preview 6](172/preview_6.png) | ![preview 7](172/preview_7.png) | ![preview 8](172/preview_8.png) | | 173 | 14 | [Download](173/dataset.zip) | ![preview 1](173/preview_1.png) | ![preview 2](173/preview_2.png) | ![preview 3](173/preview_3.png) | ![preview 4](173/preview_4.png) | ![preview 5](173/preview_5.png) | ![preview 6](173/preview_6.png) | ![preview 7](173/preview_7.png) | ![preview 8](173/preview_8.png) | | 174 | 8 | [Download](174/dataset.zip) | ![preview 1](174/preview_1.png) | ![preview 2](174/preview_2.png) | ![preview 3](174/preview_3.png) | ![preview 4](174/preview_4.png) | ![preview 5](174/preview_5.png) | ![preview 6](174/preview_6.png) | ![preview 7](174/preview_7.png) | ![preview 8](174/preview_8.png) | | 175 | 24 | [Download](175/dataset.zip) | ![preview 1](175/preview_1.png) | ![preview 2](175/preview_2.png) | ![preview 3](175/preview_3.png) | ![preview 4](175/preview_4.png) | ![preview 5](175/preview_5.png) | ![preview 6](175/preview_6.png) | ![preview 7](175/preview_7.png) | ![preview 8](175/preview_8.png) | | 176 | 43 | [Download](176/dataset.zip) | ![preview 1](176/preview_1.png) | ![preview 2](176/preview_2.png) | ![preview 3](176/preview_3.png) | ![preview 4](176/preview_4.png) | ![preview 5](176/preview_5.png) | ![preview 6](176/preview_6.png) | ![preview 7](176/preview_7.png) | ![preview 8](176/preview_8.png) | | 177 | 27 | [Download](177/dataset.zip) | ![preview 1](177/preview_1.png) | ![preview 2](177/preview_2.png) | ![preview 3](177/preview_3.png) | ![preview 4](177/preview_4.png) | ![preview 5](177/preview_5.png) | ![preview 6](177/preview_6.png) | ![preview 7](177/preview_7.png) | ![preview 8](177/preview_8.png) | | 178 | 11 | [Download](178/dataset.zip) | ![preview 1](178/preview_1.png) | ![preview 2](178/preview_2.png) | ![preview 3](178/preview_3.png) | ![preview 4](178/preview_4.png) | ![preview 5](178/preview_5.png) | ![preview 6](178/preview_6.png) | ![preview 7](178/preview_7.png) | ![preview 8](178/preview_8.png) | | 179 | 18 | [Download](179/dataset.zip) | ![preview 1](179/preview_1.png) | ![preview 2](179/preview_2.png) | ![preview 3](179/preview_3.png) | ![preview 4](179/preview_4.png) | ![preview 5](179/preview_5.png) | ![preview 6](179/preview_6.png) | ![preview 7](179/preview_7.png) | ![preview 8](179/preview_8.png) | | 180 | 26 | [Download](180/dataset.zip) | ![preview 1](180/preview_1.png) | ![preview 2](180/preview_2.png) | ![preview 3](180/preview_3.png) | ![preview 4](180/preview_4.png) | ![preview 5](180/preview_5.png) | ![preview 6](180/preview_6.png) | ![preview 7](180/preview_7.png) | ![preview 8](180/preview_8.png) | | 181 | 26 | [Download](181/dataset.zip) | ![preview 1](181/preview_1.png) | ![preview 2](181/preview_2.png) | ![preview 3](181/preview_3.png) | ![preview 4](181/preview_4.png) | ![preview 5](181/preview_5.png) | ![preview 6](181/preview_6.png) | ![preview 7](181/preview_7.png) | ![preview 8](181/preview_8.png) | | 182 | 33 | [Download](182/dataset.zip) | ![preview 1](182/preview_1.png) | ![preview 2](182/preview_2.png) | ![preview 3](182/preview_3.png) | ![preview 4](182/preview_4.png) | ![preview 5](182/preview_5.png) | ![preview 6](182/preview_6.png) | ![preview 7](182/preview_7.png) | ![preview 8](182/preview_8.png) | | 183 | 8 | [Download](183/dataset.zip) | ![preview 1](183/preview_1.png) | ![preview 2](183/preview_2.png) | ![preview 3](183/preview_3.png) | ![preview 4](183/preview_4.png) | ![preview 5](183/preview_5.png) | ![preview 6](183/preview_6.png) | ![preview 7](183/preview_7.png) | ![preview 8](183/preview_8.png) | | 184 | 17 | [Download](184/dataset.zip) | ![preview 1](184/preview_1.png) | ![preview 2](184/preview_2.png) | ![preview 3](184/preview_3.png) | ![preview 4](184/preview_4.png) | ![preview 5](184/preview_5.png) | ![preview 6](184/preview_6.png) | ![preview 7](184/preview_7.png) | ![preview 8](184/preview_8.png) | | 185 | 12 | [Download](185/dataset.zip) | ![preview 1](185/preview_1.png) | ![preview 2](185/preview_2.png) | ![preview 3](185/preview_3.png) | ![preview 4](185/preview_4.png) | ![preview 5](185/preview_5.png) | ![preview 6](185/preview_6.png) | ![preview 7](185/preview_7.png) | ![preview 8](185/preview_8.png) | | 186 | 10 | [Download](186/dataset.zip) | ![preview 1](186/preview_1.png) | ![preview 2](186/preview_2.png) | ![preview 3](186/preview_3.png) | ![preview 4](186/preview_4.png) | ![preview 5](186/preview_5.png) | ![preview 6](186/preview_6.png) | ![preview 7](186/preview_7.png) | ![preview 8](186/preview_8.png) | | 187 | 17 | [Download](187/dataset.zip) | ![preview 1](187/preview_1.png) | ![preview 2](187/preview_2.png) | ![preview 3](187/preview_3.png) | ![preview 4](187/preview_4.png) | ![preview 5](187/preview_5.png) | ![preview 6](187/preview_6.png) | ![preview 7](187/preview_7.png) | ![preview 8](187/preview_8.png) | | 188 | 11 | [Download](188/dataset.zip) | ![preview 1](188/preview_1.png) | ![preview 2](188/preview_2.png) | ![preview 3](188/preview_3.png) | ![preview 4](188/preview_4.png) | ![preview 5](188/preview_5.png) | ![preview 6](188/preview_6.png) | ![preview 7](188/preview_7.png) | ![preview 8](188/preview_8.png) | | 189 | 5 | [Download](189/dataset.zip) | ![preview 1](189/preview_1.png) | ![preview 2](189/preview_2.png) | ![preview 3](189/preview_3.png) | ![preview 4](189/preview_4.png) | ![preview 5](189/preview_5.png) | N/A | N/A | N/A | | 190 | 24 | [Download](190/dataset.zip) | ![preview 1](190/preview_1.png) | ![preview 2](190/preview_2.png) | ![preview 3](190/preview_3.png) | ![preview 4](190/preview_4.png) | ![preview 5](190/preview_5.png) | ![preview 6](190/preview_6.png) | ![preview 7](190/preview_7.png) | ![preview 8](190/preview_8.png) | | 191 | 23 | [Download](191/dataset.zip) | ![preview 1](191/preview_1.png) | ![preview 2](191/preview_2.png) | ![preview 3](191/preview_3.png) | ![preview 4](191/preview_4.png) | ![preview 5](191/preview_5.png) | ![preview 6](191/preview_6.png) | ![preview 7](191/preview_7.png) | ![preview 8](191/preview_8.png) | | 192 | 9 | [Download](192/dataset.zip) | ![preview 1](192/preview_1.png) | ![preview 2](192/preview_2.png) | ![preview 3](192/preview_3.png) | ![preview 4](192/preview_4.png) | ![preview 5](192/preview_5.png) | ![preview 6](192/preview_6.png) | ![preview 7](192/preview_7.png) | ![preview 8](192/preview_8.png) | | 193 | 14 | [Download](193/dataset.zip) | ![preview 1](193/preview_1.png) | ![preview 2](193/preview_2.png) | ![preview 3](193/preview_3.png) | ![preview 4](193/preview_4.png) | ![preview 5](193/preview_5.png) | ![preview 6](193/preview_6.png) | ![preview 7](193/preview_7.png) | ![preview 8](193/preview_8.png) | | 194 | 17 | [Download](194/dataset.zip) | ![preview 1](194/preview_1.png) | ![preview 2](194/preview_2.png) | ![preview 3](194/preview_3.png) | ![preview 4](194/preview_4.png) | ![preview 5](194/preview_5.png) | ![preview 6](194/preview_6.png) | ![preview 7](194/preview_7.png) | ![preview 8](194/preview_8.png) | | noise | 148 | [Download](-1/dataset.zip) | ![preview 1](-1/preview_1.png) | ![preview 2](-1/preview_2.png) | ![preview 3](-1/preview_3.png) | ![preview 4](-1/preview_4.png) | ![preview 5](-1/preview_5.png) | ![preview 6](-1/preview_6.png) | ![preview 7](-1/preview_7.png) | ![preview 8](-1/preview_8.png) |
BangumiBase/narutoshippuden
[ "size_categories:10K<n<100K", "license:mit", "art", "region:us" ]
2023-10-08T02:05:35+00:00
{"license": "mit", "size_categories": ["10K<n<100K"], "tags": ["art"]}
2023-10-08T14:11:06+00:00
[]
[]
TAGS #size_categories-10K<n<100K #license-mit #art #region-us
Bangumi Image Base of Naruto Shippuden ====================================== This is the image base of bangumi Naruto Shippuden, we detected 196 characters, 36722 images in total. The full dataset is here. Please note that these image bases are not guaranteed to be 100% cleaned, they may be noisy actual. If you intend to manually train models using this dataset, we recommend performing necessary preprocessing on the downloaded dataset to eliminate potential noisy samples (approximately 1% probability). Here is the characters' preview:
[]
[ "TAGS\n#size_categories-10K<n<100K #license-mit #art #region-us \n" ]
[ 25 ]
[ "passage: TAGS\n#size_categories-10K<n<100K #license-mit #art #region-us \n" ]
ce24d437da75115b0e0f3e4586b45fdf790f6c3f
# Dataset Card for "test_DA_tokenized" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/test_DA_tokenized
[ "region:us" ]
2023-10-08T02:18:10+00:00
{"dataset_info": {"features": [{"name": "pass_label", "dtype": "int64"}, {"name": "input", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 1197225491, "num_examples": 335850}], "download_size": 266325813, "dataset_size": 1197225491}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-08T02:18:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test_DA_tokenized" More Information needed
[ "# Dataset Card for \"test_DA_tokenized\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test_DA_tokenized\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test_DA_tokenized\"\n\nMore Information needed" ]
de4c22faf032a15f233f00e5228a05e7c71b4317
# Dataset Card for "small-coco-wm_50_2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
RIW/small-coco-wm_50_2
[ "region:us" ]
2023-10-08T02:30:37+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "caption", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "key", "dtype": "string"}, {"name": "status", "dtype": "string"}, {"name": "error_message", "dtype": "null"}, {"name": "width", "dtype": "int64"}, {"name": "height", "dtype": "int64"}, {"name": "original_width", "dtype": "int64"}, {"name": "original_height", "dtype": "int64"}, {"name": "exif", "dtype": "string"}, {"name": "sha256", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 781729596.182, "num_examples": 8362}, {"name": "validation", "num_bytes": 851865993.632, "num_examples": 8514}], "download_size": 554825307, "dataset_size": 1633595589.8140001}}
2023-10-08T02:32:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "small-coco-wm_50_2" More Information needed
[ "# Dataset Card for \"small-coco-wm_50_2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"small-coco-wm_50_2\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"small-coco-wm_50_2\"\n\nMore Information needed" ]
31cc4dd42cda279f80682b5c6c6139c41c380d38
# Dataset Card for "nguyen-edu-date" Left: 31694 rows - 0.38 % [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
H4438/nguyen-edu-date
[ "region:us" ]
2023-10-08T02:35:44+00:00
{"dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "dates", "sequence": "string"}, {"name": "body", "dtype": "string"}, {"name": "est_date", "dtype": "string"}, {"name": "ext_dates", "sequence": "string"}, {"name": "flt_dates", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 319001143, "num_examples": 84496}], "download_size": 106437844, "dataset_size": 319001143}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-08T17:18:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "nguyen-edu-date" Left: 31694 rows - 0.38 % More Information needed
[ "# Dataset Card for \"nguyen-edu-date\"\nLeft: 31694 rows - 0.38 %\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"nguyen-edu-date\"\nLeft: 31694 rows - 0.38 %\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"nguyen-edu-date\"\nLeft: 31694 rows - 0.38 %\n\nMore Information needed" ]
031e3047b44a4f0eb72fd7f83d068908546d4e7c
# Dataset Card for "test_DA_tokenized2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/test_DA_tokenized2
[ "region:us" ]
2023-10-08T02:43:06+00:00
{"dataset_info": {"features": [{"name": "pass_label", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 456736095, "num_examples": 335850}], "download_size": 104506387, "dataset_size": 456736095}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-08T02:43:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test_DA_tokenized2" More Information needed
[ "# Dataset Card for \"test_DA_tokenized2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test_DA_tokenized2\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test_DA_tokenized2\"\n\nMore Information needed" ]
b1f1f91b8a92610e1a6d9c59f5c7eb2583ec004f
# Dataset Card for "thang-edu-date" Left: 47461 rows - 0.38 % [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
H4438/thang-edu-date
[ "region:us" ]
2023-10-08T02:43:16+00:00
{"dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "dates", "sequence": "string"}, {"name": "body", "dtype": "string"}, {"name": "est_date", "dtype": "string"}, {"name": "ext_dates", "sequence": "string"}, {"name": "flt_dates", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 551928907, "num_examples": 126409}], "download_size": 190841081, "dataset_size": 551928907}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-08T17:13:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "thang-edu-date" Left: 47461 rows - 0.38 % More Information needed
[ "# Dataset Card for \"thang-edu-date\"\nLeft: 47461 rows - 0.38 %\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"thang-edu-date\"\nLeft: 47461 rows - 0.38 %\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"thang-edu-date\"\nLeft: 47461 rows - 0.38 %\n\nMore Information needed" ]
fcb8deb6f2d29262fea8e3bcab5f8b42883cc71e
# Dataset Card for "toxigen-train-es" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Intuit-GenSRF/toxigen-train-es
[ "region:us" ]
2023-10-08T02:50:35+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "sequence": "string"}, {"name": "processed_text", "sequence": "string"}, {"name": "num_tokens", "dtype": "int64"}, {"name": "text_es", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 426023671, "num_examples": 250880}], "download_size": 10528800, "dataset_size": 426023671}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-08T02:50:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "toxigen-train-es" More Information needed
[ "# Dataset Card for \"toxigen-train-es\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"toxigen-train-es\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"toxigen-train-es\"\n\nMore Information needed" ]
a4ba59b06b2bc2e47e759c1771e7f8a94f6be8e4
# Dataset Card for "TrainDedupedRefDatasetWMetricFinal1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Fraol/TrainDedupedRefDatasetWMetricFinal1
[ "region:us" ]
2023-10-08T03:25:21+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "path_name", "dtype": "string"}, {"name": "file_name", "dtype": "string"}, {"name": "ref_type", "dtype": "string"}, {"name": "hash", "dtype": "string"}, {"name": "class_name", "dtype": "string"}, {"name": "method_name", "dtype": "string"}, {"name": "row_number", "dtype": "int64"}, {"name": "cbo", "dtype": "float64"}, {"name": "wmc", "dtype": "float64"}, {"name": "lcom*", "dtype": "float64"}, {"name": "loc", "dtype": "float64"}, {"name": "astc2", "dtype": "string"}, {"name": "source_after", "dtype": "string"}, {"name": "cbo_after", "dtype": "float64"}, {"name": "wmc_after", "dtype": "float64"}, {"name": "lcom*_after", "dtype": "float64"}, {"name": "loc_after", "dtype": "float64"}, {"name": "astc1", "dtype": "string"}, {"name": "issue_name", "dtype": "string"}, {"name": "issue_localize", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 418141332, "num_examples": 15000}, {"name": "test", "num_bytes": 80590478, "num_examples": 3000}], "download_size": 113829036, "dataset_size": 498731810}}
2023-10-08T03:25:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "TrainDedupedRefDatasetWMetricFinal1" More Information needed
[ "# Dataset Card for \"TrainDedupedRefDatasetWMetricFinal1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"TrainDedupedRefDatasetWMetricFinal1\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"TrainDedupedRefDatasetWMetricFinal1\"\n\nMore Information needed" ]
37774549eb69d37c9b874e16134fec23980bcc8d
# Dataset Card for "test_model_dataset_20231008_043111" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tr416/test_model_dataset_20231008_043111
[ "region:us" ]
2023-10-08T03:31:11+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 762696.0, "num_examples": 297}, {"name": "test", "num_bytes": 7704.0, "num_examples": 3}], "download_size": 73907, "dataset_size": 770400.0}}
2023-10-08T03:31:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test_model_dataset_20231008_043111" More Information needed
[ "# Dataset Card for \"test_model_dataset_20231008_043111\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test_model_dataset_20231008_043111\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test_model_dataset_20231008_043111\"\n\nMore Information needed" ]
9f13656193832fae88f1fbac07981f372f712701
# Dataset Card for "for_align" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
fiveflow/for_align
[ "region:us" ]
2023-10-08T03:37:59+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "chosen", "dtype": "string"}, {"name": "rejected", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 35614538, "num_examples": 17281}, {"name": "test", "num_bytes": 3992474, "num_examples": 1915}], "download_size": 22211168, "dataset_size": 39607012}}
2023-10-08T03:59:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "for_align" More Information needed
[ "# Dataset Card for \"for_align\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"for_align\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"for_align\"\n\nMore Information needed" ]
d5867c529a25902d06e090b036a0fe19dec094d6
# Dataset Card for "TrainDedupedRefDatasetWMetricFinal2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Fraol/TrainDedupedRefDatasetWMetricFinal2
[ "region:us" ]
2023-10-08T03:38:50+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "path_name", "dtype": "string"}, {"name": "file_name", "dtype": "string"}, {"name": "ref_type", "dtype": "string"}, {"name": "hash", "dtype": "string"}, {"name": "class_name", "dtype": "string"}, {"name": "method_name", "dtype": "string"}, {"name": "row_number", "dtype": "int64"}, {"name": "cbo", "dtype": "float64"}, {"name": "wmc", "dtype": "float64"}, {"name": "lcom*", "dtype": "float64"}, {"name": "loc", "dtype": "float64"}, {"name": "astc2", "dtype": "string"}, {"name": "source_after", "dtype": "string"}, {"name": "cbo_after", "dtype": "float64"}, {"name": "wmc_after", "dtype": "float64"}, {"name": "lcom*_after", "dtype": "float64"}, {"name": "loc_after", "dtype": "float64"}, {"name": "astc1", "dtype": "string"}, {"name": "issue_name", "dtype": "string"}, {"name": "issue_localize", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 427412313, "num_examples": 15000}, {"name": "test", "num_bytes": 83826934, "num_examples": 3000}], "download_size": 116099161, "dataset_size": 511239247}}
2023-10-08T03:38:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "TrainDedupedRefDatasetWMetricFinal2" More Information needed
[ "# Dataset Card for \"TrainDedupedRefDatasetWMetricFinal2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"TrainDedupedRefDatasetWMetricFinal2\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"TrainDedupedRefDatasetWMetricFinal2\"\n\nMore Information needed" ]
40a7de4c40f6535269434c4dc76294d9a75fb365
# Dataset Card for "lima-u" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
TinyPixel/lima-u
[ "region:us" ]
2023-10-08T04:28:31+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1804087, "num_examples": 780}], "download_size": 1044055, "dataset_size": 1804087}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2024-02-04T03:50:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lima-u" More Information needed
[ "# Dataset Card for \"lima-u\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lima-u\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"lima-u\"\n\nMore Information needed" ]
9a1d51a26e6d5875fca920b1485da83c7158e35f
# Dataset Card for "test_ner" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
m-aliabbas1/test_ner
[ "region:us" ]
2023-10-08T04:34:26+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 40184.8938547486, "num_examples": 304}, {"name": "test", "num_bytes": 7138.106145251397, "num_examples": 54}], "download_size": 8540, "dataset_size": 47323.0}}
2023-10-08T04:35:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test_ner" More Information needed
[ "# Dataset Card for \"test_ner\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test_ner\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test_ner\"\n\nMore Information needed" ]
c8dd9493e0c02f0844e5ea251818ca04af7b1ef8
--- # TableLlama: Towards Open Large Generalist Models for Tables Project Page: [https://osu-nlp-group.github.io/TableLlama/](https://osu-nlp-group.github.io/TableLlama/) Paper: [https://arxiv.org/abs/2311.09206](https://arxiv.org/abs/2311.09206) Model: [https://huggingface.co/osunlp/TableLlama/](https://huggingface.co/osunlp/TableLlama/) Code: [https://osu-nlp-group.github.io/TableLlama/](https://osu-nlp-group.github.io/TableLlama/) ## Introduction We introduce TableLlama, an open-source large generalist model specifically tailored for various table-based tasks. The TableLlama model is trained on TableInstruct Dataset, a meticulously curated instruction tuning dataset for tables. TableLlama is tuned on 2.6 million table-based task data, and can handle up to 8K context! ## Model 🤗 [TableLlama-7B](https://huggingface.co/osunlp/TableLlama/) ## Data The models are trained on the 🤗 [TableInstruct Dataset](https://huggingface.co/datasets/osunlp/TableInstruct), which includes a comprehensive table-based instruction tuning dataset that covers a variety of real-world tables and realistic tasks. We include 14 datasets of 11 tasks in total. Check out the dataset card for more details. ## Training Procedure The models are fine-tuned with the TableInstruct dataset using LongLoRA (7B), fully fine-tuning version as the base model, which replaces the vanilla attention mechanism of the original Llama-2 (7B) with shift short attention. The training takes 9 days on a 48*A100 cluster. Check out our paper for more details. ## Evaluation The models are evaluated on 8 in-domain datasets of 8 tasks and 6 out-of-domain datasets of 4 tasks. ## Usage You can use the models through Huggingface's Transformers library. Check our Github repo for more advanced use: [https://osu-nlp-group.github.io/TableLlama/](https://osu-nlp-group.github.io/TableLlama/) ## Prompt Format ``` Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Input: {input} ### Question: {question} ### Response: ``` ## Citation If you use the models, data, or code from this project, please cite the original paper: ``` @misc{zhang2023tablellama, title={TableLlama: Towards Open Large Generalist Models for Tables}, author={Tianshu Zhang and Xiang Yue and Yifei Li and Huan Sun}, year={2023}, eprint={2311.09206}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
osunlp/TableInstruct
[ "size_categories:1M<n<10M", "language:en", "license:cc-by-4.0", "arxiv:2311.09206", "region:us" ]
2023-10-08T04:56:59+00:00
{"language": ["en"], "license": "cc-by-4.0", "size_categories": ["1M<n<10M"]}
2023-12-07T23:53:00+00:00
[ "2311.09206" ]
[ "en" ]
TAGS #size_categories-1M<n<10M #language-English #license-cc-by-4.0 #arxiv-2311.09206 #region-us
--- # TableLlama: Towards Open Large Generalist Models for Tables Project Page: URL Paper: URL Model: URL Code: URL ## Introduction We introduce TableLlama, an open-source large generalist model specifically tailored for various table-based tasks. The TableLlama model is trained on TableInstruct Dataset, a meticulously curated instruction tuning dataset for tables. TableLlama is tuned on 2.6 million table-based task data, and can handle up to 8K context! ## Model TableLlama-7B ## Data The models are trained on the TableInstruct Dataset, which includes a comprehensive table-based instruction tuning dataset that covers a variety of real-world tables and realistic tasks. We include 14 datasets of 11 tasks in total. Check out the dataset card for more details. ## Training Procedure The models are fine-tuned with the TableInstruct dataset using LongLoRA (7B), fully fine-tuning version as the base model, which replaces the vanilla attention mechanism of the original Llama-2 (7B) with shift short attention. The training takes 9 days on a 48*A100 cluster. Check out our paper for more details. ## Evaluation The models are evaluated on 8 in-domain datasets of 8 tasks and 6 out-of-domain datasets of 4 tasks. ## Usage You can use the models through Huggingface's Transformers library. Check our Github repo for more advanced use: URL ## Prompt Format If you use the models, data, or code from this project, please cite the original paper:
[ "# TableLlama: Towards Open Large Generalist Models for Tables\n\nProject Page: URL\n\nPaper: URL\n\nModel: URL\n\nCode: URL", "## Introduction\nWe introduce TableLlama, an open-source large generalist model specifically tailored for various table-based tasks. The TableLlama model is trained on TableInstruct Dataset, a meticulously curated instruction tuning dataset for tables. TableLlama is tuned on 2.6 million table-based task data, and can handle up to 8K context!", "## Model \n\n TableLlama-7B", "## Data\nThe models are trained on the TableInstruct Dataset, which includes a comprehensive table-based instruction tuning dataset that covers a variety of real-world tables and realistic tasks. We include 14 datasets of 11 tasks in total. Check out the dataset card for more details.", "## Training Procedure\nThe models are fine-tuned with the TableInstruct dataset using LongLoRA (7B), fully fine-tuning version as the base model, which replaces the vanilla attention mechanism of the original Llama-2 (7B) with shift short attention. The training takes 9 days on a 48*A100 cluster. Check out our paper for more details.", "## Evaluation\nThe models are evaluated on 8 in-domain datasets of 8 tasks and 6 out-of-domain datasets of 4 tasks.", "## Usage\nYou can use the models through Huggingface's Transformers library. \nCheck our Github repo for more advanced use: URL", "## Prompt Format\n\n\nIf you use the models, data, or code from this project, please cite the original paper:" ]
[ "TAGS\n#size_categories-1M<n<10M #language-English #license-cc-by-4.0 #arxiv-2311.09206 #region-us \n", "# TableLlama: Towards Open Large Generalist Models for Tables\n\nProject Page: URL\n\nPaper: URL\n\nModel: URL\n\nCode: URL", "## Introduction\nWe introduce TableLlama, an open-source large generalist model specifically tailored for various table-based tasks. The TableLlama model is trained on TableInstruct Dataset, a meticulously curated instruction tuning dataset for tables. TableLlama is tuned on 2.6 million table-based task data, and can handle up to 8K context!", "## Model \n\n TableLlama-7B", "## Data\nThe models are trained on the TableInstruct Dataset, which includes a comprehensive table-based instruction tuning dataset that covers a variety of real-world tables and realistic tasks. We include 14 datasets of 11 tasks in total. Check out the dataset card for more details.", "## Training Procedure\nThe models are fine-tuned with the TableInstruct dataset using LongLoRA (7B), fully fine-tuning version as the base model, which replaces the vanilla attention mechanism of the original Llama-2 (7B) with shift short attention. The training takes 9 days on a 48*A100 cluster. Check out our paper for more details.", "## Evaluation\nThe models are evaluated on 8 in-domain datasets of 8 tasks and 6 out-of-domain datasets of 4 tasks.", "## Usage\nYou can use the models through Huggingface's Transformers library. \nCheck our Github repo for more advanced use: URL", "## Prompt Format\n\n\nIf you use the models, data, or code from this project, please cite the original paper:" ]
[ 40, 30, 84, 7, 68, 79, 37, 32, 25 ]
[ "passage: TAGS\n#size_categories-1M<n<10M #language-English #license-cc-by-4.0 #arxiv-2311.09206 #region-us \n# TableLlama: Towards Open Large Generalist Models for Tables\n\nProject Page: URL\n\nPaper: URL\n\nModel: URL\n\nCode: URL## Introduction\nWe introduce TableLlama, an open-source large generalist model specifically tailored for various table-based tasks. The TableLlama model is trained on TableInstruct Dataset, a meticulously curated instruction tuning dataset for tables. TableLlama is tuned on 2.6 million table-based task data, and can handle up to 8K context!## Model \n\n TableLlama-7B## Data\nThe models are trained on the TableInstruct Dataset, which includes a comprehensive table-based instruction tuning dataset that covers a variety of real-world tables and realistic tasks. We include 14 datasets of 11 tasks in total. Check out the dataset card for more details.## Training Procedure\nThe models are fine-tuned with the TableInstruct dataset using LongLoRA (7B), fully fine-tuning version as the base model, which replaces the vanilla attention mechanism of the original Llama-2 (7B) with shift short attention. The training takes 9 days on a 48*A100 cluster. Check out our paper for more details.## Evaluation\nThe models are evaluated on 8 in-domain datasets of 8 tasks and 6 out-of-domain datasets of 4 tasks.## Usage\nYou can use the models through Huggingface's Transformers library. \nCheck our Github repo for more advanced use: URL## Prompt Format\n\n\nIf you use the models, data, or code from this project, please cite the original paper:" ]
bbb3aa9dfda8e567da21356658e14aa5447486c3
# Dataset Card for "gpt_target_group_v1-2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
datazeit/gpt_target_group_v1-2
[ "region:us" ]
2023-10-08T05:06:58+00:00
{"dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "category", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "result", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2879849, "num_examples": 1984}], "download_size": 1125328, "dataset_size": 2879849}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-08T06:03:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for "gpt_target_group_v1-2" More Information needed
[ "# Dataset Card for \"gpt_target_group_v1-2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"gpt_target_group_v1-2\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"gpt_target_group_v1-2\"\n\nMore Information needed" ]
e8896481c8aa8ade59ef2a76683104575e7fa3bb
# Dataset Card for "merged_data1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tanvirsrbd1/merged_data1
[ "region:us" ]
2023-10-08T05:08:08+00:00
{"dataset_info": {"features": [{"name": "html", "dtype": "string"}, {"name": "response", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3310511, "num_examples": 5960}], "download_size": 1072441, "dataset_size": 3310511}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-08T05:08:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "merged_data1" More Information needed
[ "# Dataset Card for \"merged_data1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"merged_data1\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"merged_data1\"\n\nMore Information needed" ]
52f74872698024f8359a5a8d60fb7001cfbaa38c
# Dataset Card for "photogram_prompts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Falah/photogram_prompts
[ "region:us" ]
2023-10-08T05:21:32+00:00
{"dataset_info": {"features": [{"name": "prompts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1790632, "num_examples": 10000}], "download_size": 248890, "dataset_size": 1790632}}
2023-10-08T05:39:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "photogram_prompts" More Information needed
[ "# Dataset Card for \"photogram_prompts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"photogram_prompts\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"photogram_prompts\"\n\nMore Information needed" ]
22f533842bdeed55ace6d74dba55ff53ae203f8d
# Dataset Card for "whats-in-a-name_v0.1_embeds_clip-b32" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dmarx/whats-in-a-name_v0.1_embeds_clip-b32
[ "region:us" ]
2023-10-08T05:23:37+00:00
{"dataset_info": {"features": [{"name": "class_idx", "dtype": "int64"}, {"name": "name", "dtype": "string"}, {"name": "root", "dtype": "string"}, {"name": "image_id", "dtype": "string"}, {"name": "embed_type", "dtype": "string"}, {"name": "path", "dtype": "string"}, {"name": "embed", "sequence": "float32"}, {"name": "embed_normed", "sequence": "float32"}, {"name": "similarity@6", "dtype": "float64"}, {"name": "DIV@6", "dtype": "float64"}, {"name": "similarity@12", "dtype": "float64"}, {"name": "DIV@12", "dtype": "float64"}, {"name": "similarity@18", "dtype": "float64"}, {"name": "DIV@18", "dtype": "float64"}, {"name": "similarity@24", "dtype": "float64"}, {"name": "DIV@24", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 149815296, "num_examples": 34200}], "download_size": 72810192, "dataset_size": 149815296}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-08T05:31:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for "whats-in-a-name_v0.1_embeds_clip-b32" More Information needed
[ "# Dataset Card for \"whats-in-a-name_v0.1_embeds_clip-b32\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"whats-in-a-name_v0.1_embeds_clip-b32\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"whats-in-a-name_v0.1_embeds_clip-b32\"\n\nMore Information needed" ]
abb68e86a267ffc3272dc2b9b98dbbbef182d44f
# Dataset Card for "aozora" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ohtaman/aozora
[ "region:us" ]
2023-10-08T05:25:53+00:00
{"dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "author", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "filename", "dtype": "string"}, {"name": "category", "dtype": "string"}, {"name": "short_description", "dtype": "string"}, {"name": "char_kana_type", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 704528623.1545657, "num_examples": 17006}, {"name": "test", "num_bytes": 4142823.8454343504, "num_examples": 100}], "download_size": 393522386, "dataset_size": 708671447.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2023-10-21T04:13:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "aozora" More Information needed
[ "# Dataset Card for \"aozora\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"aozora\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"aozora\"\n\nMore Information needed" ]
91f70390a95d581211fe96fd6e86baed8baabf37
# Dataset Card for "sur_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carnival13/sur_test
[ "region:us" ]
2023-10-08T05:26:32+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 1297540140, "num_examples": 900000}], "download_size": 298907283, "dataset_size": 1297540140}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-08T05:27:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sur_test" More Information needed
[ "# Dataset Card for \"sur_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sur_test\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"sur_test\"\n\nMore Information needed" ]
539c849aa53f27d5809a72b7ba20d2f6deb7cd9f
# Dataset Card for "fr_sexism_labelled" Based on the Kaggle dataset [Sexist Workplace Statements](https://www.kaggle.com/datasets/dgrosz/sexist-workplace-statements). This dataset features more than 1100 examples of statements of workplace sexism, roughly balanced between examples of certain sexism and ambiguous or neutral cases (labeled with a “1” and “0” respectively). The original English dataset has been translated into French via machine translation with the [Helsinki-NLP/opus-mt-en-fr](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr) model.
lidiapierre/fr_sexism_labelled
[ "region:us" ]
2023-10-08T05:34:04+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "Sentences", "dtype": "string"}, {"name": "Label", "dtype": "int64"}, {"name": "fr_sentences", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 192216, "num_examples": 1137}], "download_size": 119626, "dataset_size": 192216}}
2023-10-08T05:42:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "fr_sexism_labelled" Based on the Kaggle dataset Sexist Workplace Statements. This dataset features more than 1100 examples of statements of workplace sexism, roughly balanced between examples of certain sexism and ambiguous or neutral cases (labeled with a “1” and “0” respectively). The original English dataset has been translated into French via machine translation with the Helsinki-NLP/opus-mt-en-fr model.
[ "# Dataset Card for \"fr_sexism_labelled\"\n\nBased on the Kaggle dataset Sexist Workplace Statements.\n\nThis dataset features more than 1100 examples of statements of workplace sexism, roughly balanced between examples of certain sexism and ambiguous or neutral cases (labeled with a “1” and “0” respectively).\n\nThe original English dataset has been translated into French via machine translation with the Helsinki-NLP/opus-mt-en-fr model." ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"fr_sexism_labelled\"\n\nBased on the Kaggle dataset Sexist Workplace Statements.\n\nThis dataset features more than 1100 examples of statements of workplace sexism, roughly balanced between examples of certain sexism and ambiguous or neutral cases (labeled with a “1” and “0” respectively).\n\nThe original English dataset has been translated into French via machine translation with the Helsinki-NLP/opus-mt-en-fr model." ]
[ 6, 113 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"fr_sexism_labelled\"\n\nBased on the Kaggle dataset Sexist Workplace Statements.\n\nThis dataset features more than 1100 examples of statements of workplace sexism, roughly balanced between examples of certain sexism and ambiguous or neutral cases (labeled with a “1” and “0” respectively).\n\nThe original English dataset has been translated into French via machine translation with the Helsinki-NLP/opus-mt-en-fr model." ]
96385db78aa5c46fe8f1d63c6088bbd82bde5960
# Dataset Card for "DONOTUSEDATA-XL-SideA" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
BirdL/DONOTUSEDATA-XL-SideA
[ "not-for-all-audiences", "region:us" ]
2023-10-08T05:42:23+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "sexual", "dtype": "float64"}, {"name": "hate", "dtype": "float64"}, {"name": "violence", "dtype": "float64"}, {"name": "self-harm", "dtype": "float64"}, {"name": "sexual/minors", "dtype": "float64"}, {"name": "hate/threatening", "dtype": "float64"}, {"name": "violence/graphic", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 56475483, "num_examples": 200001}], "download_size": 44262376, "dataset_size": 56475483}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "tags": ["not-for-all-audiences"]}
2023-11-13T03:07:11+00:00
[]
[]
TAGS #not-for-all-audiences #region-us
# Dataset Card for "DONOTUSEDATA-XL-SideA" More Information needed
[ "# Dataset Card for \"DONOTUSEDATA-XL-SideA\"\n\nMore Information needed" ]
[ "TAGS\n#not-for-all-audiences #region-us \n", "# Dataset Card for \"DONOTUSEDATA-XL-SideA\"\n\nMore Information needed" ]
[ 15, 21 ]
[ "passage: TAGS\n#not-for-all-audiences #region-us \n# Dataset Card for \"DONOTUSEDATA-XL-SideA\"\n\nMore Information needed" ]
749747465ee7d08dda4fe0656e4b8030183561a1
# Dataset Card for "med_images" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SUSHMITH/med_images
[ "region:us" ]
2023-10-08T05:53:00+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "aspirin", "1": "benedryl", "2": "dolo", "3": "paracetmol", "4": "zincovit"}}}}], "splits": [{"name": "train", "num_bytes": 291504.35, "num_examples": 17}, {"name": "test", "num_bytes": 35188.65, "num_examples": 3}], "download_size": 329516, "dataset_size": 326693.0}}
2023-10-08T05:53:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "med_images" More Information needed
[ "# Dataset Card for \"med_images\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"med_images\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"med_images\"\n\nMore Information needed" ]
8e5657fdbfc3401909e038a979c0531a3429cfe7
# Dataset Card for "storyteller" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sajjadamjad/storyteller
[ "region:us" ]
2023-10-08T06:02:56+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}], "splits": [{"name": "train", "num_bytes": 2196528.0, "num_examples": 268}, {"name": "test", "num_bytes": 245880.0, "num_examples": 30}], "download_size": 1128492, "dataset_size": 2442408.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2023-10-16T14:06:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "storyteller" More Information needed
[ "# Dataset Card for \"storyteller\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"storyteller\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"storyteller\"\n\nMore Information needed" ]
b6498f52801a419a1635195389a7d200a765c513
# Dataset Card for "book_cover_prompts_with_text" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Falah/book_cover_prompts_with_text
[ "region:us" ]
2023-10-08T07:00:54+00:00
{"dataset_info": {"features": [{"name": "prompts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 297202, "num_examples": 1000}], "download_size": 30394, "dataset_size": 297202}}
2023-10-08T07:06:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "book_cover_prompts_with_text" More Information needed
[ "# Dataset Card for \"book_cover_prompts_with_text\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"book_cover_prompts_with_text\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"book_cover_prompts_with_text\"\n\nMore Information needed" ]
8f1e261eceda7ea659a12b906ef0b0ad5b35caca
# Dataset Card for "english_preference_ultra_feedback_unfiltered" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
DialogueCharacter/english_preference_ultra_feedback_unfiltered
[ "region:us" ]
2023-10-08T07:14:26+00:00
{"dataset_info": {"features": [{"name": "chosen", "dtype": "string"}, {"name": "rejected", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 315493419, "num_examples": 112568}], "download_size": 75641649, "dataset_size": 315493419}}
2023-10-08T07:14:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for "english_preference_ultra_feedback_unfiltered" More Information needed
[ "# Dataset Card for \"english_preference_ultra_feedback_unfiltered\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"english_preference_ultra_feedback_unfiltered\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"english_preference_ultra_feedback_unfiltered\"\n\nMore Information needed" ]
fd8473e77a088926a1627e753a334ff31da6b192
# Dataset Card for "book_cover_prompts_with_sections" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Falah/book_cover_prompts_with_sections
[ "region:us" ]
2023-10-08T07:17:39+00:00
{"dataset_info": {"features": [{"name": "prompts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 393452, "num_examples": 1000}], "download_size": 45494, "dataset_size": 393452}}
2023-10-08T07:54:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "book_cover_prompts_with_sections" More Information needed
[ "# Dataset Card for \"book_cover_prompts_with_sections\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"book_cover_prompts_with_sections\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"book_cover_prompts_with_sections\"\n\nMore Information needed" ]
f6d10d9017034cb74d73ee3cb2a00e5a0e64e605
# Dataset Card for "ip-multi-ds" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
destitech/ip-multi-ds
[ "region:us" ]
2023-10-08T07:30:11+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "conditioning_image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 21178236341.868, "num_examples": 12922}], "download_size": 14024940941, "dataset_size": 21178236341.868}}
2023-10-08T07:52:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ip-multi-ds" More Information needed
[ "# Dataset Card for \"ip-multi-ds\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ip-multi-ds\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ip-multi-ds\"\n\nMore Information needed" ]
a6981cf4057d27811a189cc869da102b208f5443
# Dataset Card for "COVID-QA-Chunk-64-sentence-transformer-biencoder-data-65_25_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
minh21/COVID-QA-Chunk-64-sentence-transformer-biencoder-data-65_25_10
[ "region:us" ]
2023-10-08T07:40:54+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "positive", "dtype": "string"}, {"name": "negative", "dtype": "string"}, {"name": "document_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 6904342, "num_examples": 6435}, {"name": "test", "num_bytes": 822389, "num_examples": 710}], "download_size": 684517, "dataset_size": 7726731}}
2023-10-08T07:40:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "COVID-QA-Chunk-64-sentence-transformer-biencoder-data-65_25_10" More Information needed
[ "# Dataset Card for \"COVID-QA-Chunk-64-sentence-transformer-biencoder-data-65_25_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"COVID-QA-Chunk-64-sentence-transformer-biencoder-data-65_25_10\"\n\nMore Information needed" ]
[ 6, 37 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"COVID-QA-Chunk-64-sentence-transformer-biencoder-data-65_25_10\"\n\nMore Information needed" ]
d10de40bdd57b634c1d9243c816f8c1c4cf7e244
# Dataset Card for "COVID-QA-Chunk-64-testset-biencoder-data-65_25_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
minh21/COVID-QA-Chunk-64-testset-biencoder-data-65_25_10
[ "region:us" ]
2023-10-08T07:41:06+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "context_chunks", "sequence": "string"}, {"name": "document_id", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13347211, "num_examples": 203}], "download_size": 437507, "dataset_size": 13347211}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-08T07:41:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "COVID-QA-Chunk-64-testset-biencoder-data-65_25_10" More Information needed
[ "# Dataset Card for \"COVID-QA-Chunk-64-testset-biencoder-data-65_25_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"COVID-QA-Chunk-64-testset-biencoder-data-65_25_10\"\n\nMore Information needed" ]
[ 6, 34 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"COVID-QA-Chunk-64-testset-biencoder-data-65_25_10\"\n\nMore Information needed" ]
b23170a786319e36be6bee9d1321cae9a594a473
# Dataset Card for "COVID-QA-Chunk-64-question-answering-biencoder-data-65_25_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
minh21/COVID-QA-Chunk-64-question-answering-biencoder-data-65_25_10
[ "region:us" ]
2023-10-08T07:41:20+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "context_chunks", "sequence": "string"}, {"name": "document_id", "dtype": "int64"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 48800727, "num_examples": 1176}, {"name": "validation", "num_bytes": 4517266, "num_examples": 134}], "download_size": 13294538, "dataset_size": 53317993}}
2023-10-08T07:41:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "COVID-QA-Chunk-64-question-answering-biencoder-data-65_25_10" More Information needed
[ "# Dataset Card for \"COVID-QA-Chunk-64-question-answering-biencoder-data-65_25_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"COVID-QA-Chunk-64-question-answering-biencoder-data-65_25_10\"\n\nMore Information needed" ]
[ 6, 37 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"COVID-QA-Chunk-64-question-answering-biencoder-data-65_25_10\"\n\nMore Information needed" ]
799601e73d0bb1b272adfb864d3e76f9bfa883b2
# Dataset Card for "programming_book_cover_prompts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Falah/programming_book_cover_prompts
[ "region:us" ]
2023-10-08T08:00:50+00:00
{"dataset_info": {"features": [{"name": "prompts", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 191332, "num_examples": 1000}], "download_size": 24579, "dataset_size": 191332}}
2023-10-08T08:00:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "programming_book_cover_prompts" More Information needed
[ "# Dataset Card for \"programming_book_cover_prompts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"programming_book_cover_prompts\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"programming_book_cover_prompts\"\n\nMore Information needed" ]
e417f2f44cdd7e912517d9cb1f8662a55dd933be
# Dataset Card for "synpre_delete_1M" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/synpre_delete_1M
[ "region:us" ]
2023-10-08T08:08:19+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1742619734, "num_examples": 1000000}, {"name": "validation", "num_bytes": 17552085, "num_examples": 10000}], "download_size": 1091004286, "dataset_size": 1760171819}}
2023-10-08T08:12:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "synpre_delete_1M" More Information needed
[ "# Dataset Card for \"synpre_delete_1M\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"synpre_delete_1M\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"synpre_delete_1M\"\n\nMore Information needed" ]
ac82ab0f33778fe8e9c30f26ddcf0165bbabc3b7
# Dataset Card for "synpre_union_1M" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/synpre_union_1M
[ "region:us" ]
2023-10-08T08:16:39+00:00
{"dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1167868421, "num_examples": 1000000}, {"name": "validation", "num_bytes": 11660114, "num_examples": 10000}], "download_size": 788391948, "dataset_size": 1179528535}}
2023-10-08T08:18:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "synpre_union_1M" More Information needed
[ "# Dataset Card for \"synpre_union_1M\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"synpre_union_1M\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"synpre_union_1M\"\n\nMore Information needed" ]
58c44a10089b7a0cc2f4441db02e6629a69cfce2
# Dataset Card for Dataset Name This dataset contains manually generated instructions to modify an infographic. ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
McSpicyWithMilo/infographic-instructions
[ "language:en", "region:us" ]
2023-10-08T08:21:48+00:00
{"language": ["en"]}
2024-02-06T08:07:11+00:00
[]
[ "en" ]
TAGS #language-English #region-us
# Dataset Card for Dataset Name This dataset contains manually generated instructions to modify an infographic. ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\nThis dataset contains manually generated instructions to modify an infographic.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#language-English #region-us \n", "# Dataset Card for Dataset Name\n\nThis dataset contains manually generated instructions to modify an infographic.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 10, 25, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#language-English #region-us \n# Dataset Card for Dataset Name\n\nThis dataset contains manually generated instructions to modify an infographic.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
2ace10ede6e78318a55f2851325aea9ad4565ce3
# Dataset Card for "dolly-ko" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jjonhwa/dolly-ko
[ "region:us" ]
2023-10-08T08:55:15+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 14238792, "num_examples": 15011}], "download_size": 8006189, "dataset_size": 14238792}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-08T08:55:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dolly-ko" More Information needed
[ "# Dataset Card for \"dolly-ko\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dolly-ko\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"dolly-ko\"\n\nMore Information needed" ]
3679623cf7a2a513725a5bcf5bf0e16d31be5040
# Models ## GPTs: [• Pigeon-TextGen](https://huggingface.co/openskyml/pigeon-textgen) [• GPT-2](https://huggingface.co/gpt2) ## Chats: [• Falcon-180B-chat](https://huggingface.co/tiiuae/falcon-180B-chat) [• LLaMA-13B-Chat-GGUF](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF) ## Diffusions: [• SD-1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5) [• Dall●E-mini](https://huggingface.co/dalle-mini/dalle-mini)
openskyml/models
[ "language:en", "code", "region:us" ]
2023-10-08T09:22:16+00:00
{"language": ["en"], "tags": ["code"]}
2023-10-08T09:32:59+00:00
[]
[ "en" ]
TAGS #language-English #code #region-us
# Models ## GPTs: • Pigeon-TextGen • GPT-2 ## Chats: • Falcon-180B-chat • LLaMA-13B-Chat-GGUF ## Diffusions: • SD-1.5 • Dall●E-mini
[ "# Models", "## GPTs:\n\n• Pigeon-TextGen\n• GPT-2", "## Chats:\n\n• Falcon-180B-chat\n• LLaMA-13B-Chat-GGUF", "## Diffusions:\n\n• SD-1.5\n• Dall●E-mini" ]
[ "TAGS\n#language-English #code #region-us \n", "# Models", "## GPTs:\n\n• Pigeon-TextGen\n• GPT-2", "## Chats:\n\n• Falcon-180B-chat\n• LLaMA-13B-Chat-GGUF", "## Diffusions:\n\n• SD-1.5\n• Dall●E-mini" ]
[ 12, 3, 16, 22, 16 ]
[ "passage: TAGS\n#language-English #code #region-us \n# Models## GPTs:\n\n• Pigeon-TextGen\n• GPT-2## Chats:\n\n• Falcon-180B-chat\n• LLaMA-13B-Chat-GGUF## Diffusions:\n\n• SD-1.5\n• Dall●E-mini" ]
36417bf2aee73437e8f9bd5e75ce7720bf86bec9
# Dataset Card for Wikipedia ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [https://dumps.wikimedia.org](https://dumps.wikimedia.org) - **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Dataset Summary Wikipedia dataset containing cleaned articles of all languages. The datasets are built from the Wikipedia dump (https://dumps.wikimedia.org/) with one split per language. Each example contains the content of one full Wikipedia article with cleaning to strip markdown and unwanted sections (references, etc.). The articles are parsed using the ``mwparserfromhell`` tool. To load this dataset you need to install Apache Beam and ``mwparserfromhell`` first: ``` pip install apache_beam mwparserfromhell ``` Then, you can load any subset of Wikipedia per language and per date this way: ```python from datasets import load_dataset load_dataset("wikipedia", language="sw", date="20220120", beam_runner=...) ``` where you can pass as `beam_runner` any Apache Beam supported runner for (distributed) data processing (see [here](https://beam.apache.org/documentation/runners/capability-matrix/)). Pass "DirectRunner" to run it on your machine. You can find the full list of languages and dates [here](https://dumps.wikimedia.org/backup-index.html). Some subsets of Wikipedia have already been processed by HuggingFace, and you can load them just with: ```python from datasets import load_dataset load_dataset("wikipedia", "20220301.en") ``` The list of pre-processed subsets is: - "20220301.de" - "20220301.en" - "20220301.fr" - "20220301.frr" - "20220301.it" - "20220301.simple" ### Supported Tasks and Leaderboards The dataset is generally used for Language Modeling. ### Languages You can find the list of languages [here](https://meta.wikimedia.org/wiki/List_of_Wikipedias). ## Dataset Structure ### Data Instances An example looks as follows: ``` {'id': '1', 'url': 'https://simple.wikipedia.org/wiki/April', 'title': 'April', 'text': 'April is the fourth month...' } ``` Some subsets of Wikipedia have already been processed by HuggingFace, as you can see below: #### 20220301.de - **Size of downloaded dataset files:** 6.84 GB - **Size of the generated dataset:** 9.34 GB - **Total amount of disk used:** 16.18 GB #### 20220301.en - **Size of downloaded dataset files:** 21.60 GB - **Size of the generated dataset:** 21.26 GB - **Total amount of disk used:** 42.86 GB #### 20220301.fr - **Size of downloaded dataset files:** 5.87 GB - **Size of the generated dataset:** 7.73 GB - **Total amount of disk used:** 13.61 GB #### 20220301.frr - **Size of downloaded dataset files:** 13.04 MB - **Size of the generated dataset:** 9.57 MB - **Total amount of disk used:** 22.62 MB #### 20220301.it - **Size of downloaded dataset files:** 3.69 GB - **Size of the generated dataset:** 4.76 GB - **Total amount of disk used:** 8.45 GB #### 20220301.simple - **Size of downloaded dataset files:** 251.32 MB - **Size of the generated dataset:** 246.49 MB - **Total amount of disk used:** 497.82 MB ### Data Fields The data fields are the same among all configurations: - `id` (`str`): ID of the article. - `url` (`str`): URL of the article. - `title` (`str`): Title of the article. - `text` (`str`): Text content of the article. ### Data Splits Here are the number of examples for several configurations: | name | train | |-----------------|--------:| | 20220301.de | 2665357 | | 20220301.en | 6458670 | | 20220301.fr | 2402095 | | 20220301.frr | 15199 | | 20220301.it | 1743035 | | 20220301.simple | 205328 | ## Dataset Creation ### Curation Rationale [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Source Data #### Initial Data Collection and Normalization [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### Who are the source language producers? [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Annotations #### Annotation process [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### Who are the annotators? [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Personal and Sensitive Information [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Discussion of Biases [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Other Known Limitations [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Additional Information ### Dataset Curators [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Licensing Information Most of Wikipedia's text and many of its images are co-licensed under the [Creative Commons Attribution-ShareAlike 3.0 Unported License](https://en.wikipedia.org/wiki/Wikipedia:Text_of_Creative_Commons_Attribution-ShareAlike_3.0_Unported_License) (CC BY-SA) and the [GNU Free Documentation License](https://en.wikipedia.org/wiki/Wikipedia:Text_of_the_GNU_Free_Documentation_License) (GFDL) (unversioned, with no invariant sections, front-cover texts, or back-cover texts). Some text has been imported only under CC BY-SA and CC BY-SA-compatible license and cannot be reused under GFDL; such text will be identified on the page footer, in the page history, or on the discussion page of the article that utilizes the text. ### Citation Information ``` @ONLINE{wikidump, author = "Wikimedia Foundation", title = "Wikimedia Downloads", url = "https://dumps.wikimedia.org" } ``` ### Contributions Thanks to [@lewtun](https://github.com/lewtun), [@mariamabarham](https://github.com/mariamabarham), [@thomwolf](https://github.com/thomwolf), [@lhoestq](https://github.com/lhoestq), [@patrickvonplaten](https://github.com/patrickvonplaten) for adding this dataset.
openskyml/wikipedia
[ "task_categories:text-generation", "task_categories:fill-mask", "task_ids:language-modeling", "task_ids:masked-language-modeling", "annotations_creators:no-annotation", "language_creators:crowdsourced", "multilinguality:multilingual", "size_categories:n<1K", "size_categories:1K<n<10K", "size_categories:10K<n<100K", "size_categories:100K<n<1M", "size_categories:1M<n<10M", "source_datasets:original", "language:aa", "language:ab", "language:ace", "language:af", "language:ak", "language:als", "language:am", "language:an", "language:ang", "language:ar", "language:arc", "language:arz", "language:as", "language:ast", "language:atj", "language:av", "language:ay", "language:az", "language:azb", "language:ba", "language:bar", "language:bcl", "language:be", "language:bg", "language:bh", "language:bi", "language:bjn", "language:bm", "language:bn", "language:bo", "language:bpy", "language:br", "language:bs", "language:bug", "language:bxr", "language:ca", "language:cbk", "language:cdo", "language:ce", "language:ceb", "language:ch", "language:cho", "language:chr", "language:chy", "language:ckb", "language:co", "language:cr", "language:crh", "language:cs", "language:csb", "language:cu", "language:cv", "language:cy", "language:da", "language:de", "language:din", "language:diq", "language:dsb", "language:dty", "language:dv", "language:dz", "language:ee", "language:el", "language:eml", "language:en", "language:eo", "language:es", "language:et", "language:eu", "language:ext", "language:fa", "language:ff", "language:fi", "language:fj", "language:fo", "language:fr", "language:frp", "language:frr", "language:fur", "language:fy", "language:ga", "language:gag", "language:gan", "language:gd", "language:gl", "language:glk", "language:gn", "language:gom", "language:gor", "language:got", "language:gu", "language:gv", "language:ha", "language:hak", "language:haw", "language:he", "language:hi", "language:hif", "language:ho", "language:hr", "language:hsb", "language:ht", "language:hu", "language:hy", "language:ia", "language:id", "language:ie", "language:ig", "language:ii", "language:ik", "language:ilo", "language:inh", "language:io", "language:is", "language:it", "language:iu", "language:ja", "language:jam", "language:jbo", "language:jv", "language:ka", "language:kaa", "language:kab", "language:kbd", "language:kbp", "language:kg", "language:ki", "language:kj", "language:kk", "language:kl", "language:km", "language:kn", "language:ko", "language:koi", "language:krc", "language:ks", "language:ksh", "language:ku", "language:kv", "language:kw", "language:ky", "language:la", "language:lad", "language:lb", "language:lbe", "language:lez", "language:lfn", "language:lg", "language:li", "language:lij", "language:lmo", "language:ln", "language:lo", "language:lrc", "language:lt", "language:ltg", "language:lv", "language:lzh", "language:mai", "language:mdf", "language:mg", "language:mh", "language:mhr", "language:mi", "language:min", "language:mk", "language:ml", "language:mn", "language:mr", "language:mrj", "language:ms", "language:mt", "language:mus", "language:mwl", "language:my", "language:myv", "language:mzn", "language:na", "language:nah", "language:nan", "language:nap", "language:nds", "language:ne", "language:new", "language:ng", "language:nl", "language:nn", "language:no", "language:nov", "language:nrf", "language:nso", "language:nv", "language:ny", "language:oc", "language:olo", "language:om", "language:or", "language:os", "language:pa", "language:pag", "language:pam", "language:pap", "language:pcd", "language:pdc", "language:pfl", "language:pi", "language:pih", "language:pl", "language:pms", "language:pnb", "language:pnt", "language:ps", "language:pt", "language:qu", "language:rm", "language:rmy", "language:rn", "language:ro", "language:ru", "language:rue", "language:rup", "language:rw", "language:sa", "language:sah", "language:sat", "language:sc", "language:scn", "language:sco", "language:sd", "language:se", "language:sg", "language:sgs", "language:sh", "language:si", "language:sk", "language:sl", "language:sm", "language:sn", "language:so", "language:sq", "language:sr", "language:srn", "language:ss", "language:st", "language:stq", "language:su", "language:sv", "language:sw", "language:szl", "language:ta", "language:tcy", "language:tdt", "language:te", "language:tg", "language:th", "language:ti", "language:tk", "language:tl", "language:tn", "language:to", "language:tpi", "language:tr", "language:ts", "language:tt", "language:tum", "language:tw", "language:ty", "language:tyv", "language:udm", "language:ug", "language:uk", "language:ur", "language:uz", "language:ve", "language:vec", "language:vep", "language:vi", "language:vls", "language:vo", "language:vro", "language:wa", "language:war", "language:wo", "language:wuu", "language:xal", "language:xh", "language:xmf", "language:yi", "language:yo", "language:yue", "language:za", "language:zea", "language:zh", "language:zu", "license:cc-by-sa-3.0", "license:gfdl", "region:us" ]
2023-10-08T09:34:49+00:00
{"annotations_creators": ["no-annotation"], "language_creators": ["crowdsourced"], "language": ["aa", "ab", "ace", "af", "ak", "als", "am", "an", "ang", "ar", "arc", "arz", "as", "ast", "atj", "av", "ay", "az", "azb", "ba", "bar", "bcl", "be", "bg", "bh", "bi", "bjn", "bm", "bn", "bo", "bpy", "br", "bs", "bug", "bxr", "ca", "cbk", "cdo", "ce", "ceb", "ch", "cho", "chr", "chy", "ckb", "co", "cr", "crh", "cs", "csb", "cu", "cv", "cy", "da", "de", "din", "diq", "dsb", "dty", "dv", "dz", "ee", "el", "eml", "en", "eo", "es", "et", "eu", "ext", "fa", "ff", "fi", "fj", "fo", "fr", "frp", "frr", "fur", "fy", "ga", "gag", "gan", "gd", "gl", "glk", "gn", "gom", "gor", "got", "gu", "gv", "ha", "hak", "haw", "he", "hi", "hif", "ho", "hr", "hsb", "ht", "hu", "hy", "ia", "id", "ie", "ig", "ii", "ik", "ilo", "inh", "io", "is", "it", "iu", "ja", "jam", "jbo", "jv", "ka", "kaa", "kab", "kbd", "kbp", "kg", "ki", "kj", "kk", "kl", "km", "kn", "ko", "koi", "krc", "ks", "ksh", "ku", "kv", "kw", "ky", "la", "lad", "lb", "lbe", "lez", "lfn", "lg", "li", "lij", "lmo", "ln", "lo", "lrc", "lt", "ltg", "lv", "lzh", "mai", "mdf", "mg", "mh", "mhr", "mi", "min", "mk", "ml", "mn", "mr", "mrj", "ms", "mt", "mus", "mwl", "my", "myv", "mzn", "na", "nah", "nan", "nap", "nds", "ne", "new", "ng", "nl", "nn", "no", "nov", "nrf", "nso", "nv", "ny", "oc", "olo", "om", "or", "os", "pa", "pag", "pam", "pap", "pcd", "pdc", "pfl", "pi", "pih", "pl", "pms", "pnb", "pnt", "ps", "pt", "qu", "rm", "rmy", "rn", "ro", "ru", "rue", "rup", "rw", "sa", "sah", "sat", "sc", "scn", "sco", "sd", "se", "sg", "sgs", "sh", "si", "sk", "sl", "sm", "sn", "so", "sq", "sr", "srn", "ss", "st", "stq", "su", "sv", "sw", "szl", "ta", "tcy", "tdt", "te", "tg", "th", "ti", "tk", "tl", "tn", "to", "tpi", "tr", "ts", "tt", "tum", "tw", "ty", "tyv", "udm", "ug", "uk", "ur", "uz", "ve", "vec", "vep", "vi", "vls", "vo", "vro", "wa", "war", "wo", "wuu", "xal", "xh", "xmf", "yi", "yo", "yue", "za", "zea", "zh", "zu"], "license": ["cc-by-sa-3.0", "gfdl"], "multilinguality": ["multilingual"], "size_categories": ["n<1K", "1K<n<10K", "10K<n<100K", "100K<n<1M", "1M<n<10M"], "source_datasets": ["original"], "task_categories": ["text-generation", "fill-mask"], "task_ids": ["language-modeling", "masked-language-modeling"], "pretty_name": "Wikipedia", "config_names": ["20220301.aa", "20220301.ab", "20220301.ace", "20220301.ady", "20220301.af", "20220301.ak", "20220301.als", "20220301.am", "20220301.an", "20220301.ang", "20220301.ar", "20220301.arc", "20220301.arz", "20220301.as", "20220301.ast", "20220301.atj", "20220301.av", "20220301.ay", "20220301.az", "20220301.azb", "20220301.ba", "20220301.bar", "20220301.bat-smg", "20220301.bcl", "20220301.be", "20220301.be-x-old", "20220301.bg", "20220301.bh", "20220301.bi", "20220301.bjn", "20220301.bm", "20220301.bn", "20220301.bo", "20220301.bpy", "20220301.br", "20220301.bs", "20220301.bug", "20220301.bxr", "20220301.ca", "20220301.cbk-zam", "20220301.cdo", "20220301.ce", "20220301.ceb", "20220301.ch", "20220301.cho", "20220301.chr", "20220301.chy", "20220301.ckb", "20220301.co", "20220301.cr", "20220301.crh", "20220301.cs", "20220301.csb", "20220301.cu", "20220301.cv", "20220301.cy", "20220301.da", "20220301.de", "20220301.din", "20220301.diq", "20220301.dsb", "20220301.dty", "20220301.dv", "20220301.dz", "20220301.ee", "20220301.el", "20220301.eml", "20220301.en", "20220301.eo", "20220301.es", "20220301.et", "20220301.eu", "20220301.ext", "20220301.fa", "20220301.ff", "20220301.fi", "20220301.fiu-vro", "20220301.fj", "20220301.fo", "20220301.fr", "20220301.frp", "20220301.frr", "20220301.fur", "20220301.fy", "20220301.ga", "20220301.gag", "20220301.gan", "20220301.gd", "20220301.gl", "20220301.glk", "20220301.gn", "20220301.gom", "20220301.gor", "20220301.got", "20220301.gu", "20220301.gv", "20220301.ha", "20220301.hak", "20220301.haw", "20220301.he", "20220301.hi", "20220301.hif", "20220301.ho", "20220301.hr", "20220301.hsb", "20220301.ht", "20220301.hu", "20220301.hy", "20220301.ia", "20220301.id", "20220301.ie", "20220301.ig", "20220301.ii", "20220301.ik", "20220301.ilo", "20220301.inh", "20220301.io", "20220301.is", "20220301.it", "20220301.iu", "20220301.ja", "20220301.jam", "20220301.jbo", "20220301.jv", "20220301.ka", "20220301.kaa", "20220301.kab", "20220301.kbd", "20220301.kbp", "20220301.kg", "20220301.ki", "20220301.kj", "20220301.kk", "20220301.kl", "20220301.km", "20220301.kn", "20220301.ko", "20220301.koi", "20220301.krc", "20220301.ks", "20220301.ksh", "20220301.ku", "20220301.kv", "20220301.kw", "20220301.ky", "20220301.la", "20220301.lad", "20220301.lb", "20220301.lbe", "20220301.lez", "20220301.lfn", "20220301.lg", "20220301.li", "20220301.lij", "20220301.lmo", "20220301.ln", "20220301.lo", "20220301.lrc", "20220301.lt", "20220301.ltg", "20220301.lv", "20220301.mai", "20220301.map-bms", "20220301.mdf", "20220301.mg", "20220301.mh", "20220301.mhr", "20220301.mi", "20220301.min", "20220301.mk", "20220301.ml", "20220301.mn", "20220301.mr", "20220301.mrj", "20220301.ms", "20220301.mt", "20220301.mus", "20220301.mwl", "20220301.my", "20220301.myv", "20220301.mzn", "20220301.na", "20220301.nah", "20220301.nap", "20220301.nds", "20220301.nds-nl", "20220301.ne", "20220301.new", "20220301.ng", "20220301.nl", "20220301.nn", "20220301.no", "20220301.nov", "20220301.nrm", "20220301.nso", "20220301.nv", "20220301.ny", "20220301.oc", "20220301.olo", "20220301.om", "20220301.or", "20220301.os", "20220301.pa", "20220301.pag", "20220301.pam", "20220301.pap", "20220301.pcd", "20220301.pdc", "20220301.pfl", "20220301.pi", "20220301.pih", "20220301.pl", "20220301.pms", "20220301.pnb", "20220301.pnt", "20220301.ps", "20220301.pt", "20220301.qu", "20220301.rm", "20220301.rmy", "20220301.rn", "20220301.ro", "20220301.roa-rup", "20220301.roa-tara", "20220301.ru", "20220301.rue", "20220301.rw", "20220301.sa", "20220301.sah", "20220301.sat", "20220301.sc", "20220301.scn", "20220301.sco", "20220301.sd", "20220301.se", "20220301.sg", "20220301.sh", "20220301.si", "20220301.simple", "20220301.sk", "20220301.sl", "20220301.sm", "20220301.sn", "20220301.so", "20220301.sq", "20220301.sr", "20220301.srn", "20220301.ss", "20220301.st", "20220301.stq", "20220301.su", "20220301.sv", "20220301.sw", "20220301.szl", "20220301.ta", "20220301.tcy", "20220301.te", "20220301.tet", "20220301.tg", "20220301.th", "20220301.ti", "20220301.tk", "20220301.tl", "20220301.tn", "20220301.to", "20220301.tpi", "20220301.tr", "20220301.ts", "20220301.tt", "20220301.tum", "20220301.tw", "20220301.ty", "20220301.tyv", "20220301.udm", "20220301.ug", "20220301.uk", "20220301.ur", "20220301.uz", "20220301.ve", "20220301.vec", "20220301.vep", "20220301.vi", "20220301.vls", "20220301.vo", "20220301.wa", "20220301.war", "20220301.wo", "20220301.wuu", "20220301.xal", "20220301.xh", "20220301.xmf", "20220301.yi", "20220301.yo", "20220301.za", "20220301.zea", "20220301.zh", "20220301.zh-classical", "20220301.zh-min-nan", "20220301.zh-yue", "20220301.zu"], "language_bcp47": ["nds-nl"], "dataset_info": [{"config_name": "20220301.de", "features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 8905282792, "num_examples": 2665357}], "download_size": 6523215105, "dataset_size": 8905282792}, {"config_name": "20220301.en", "features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 20275516160, "num_examples": 6458670}], "download_size": 20598313936, "dataset_size": 20275516160}, {"config_name": "20220301.fr", "features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 7375920768, "num_examples": 2402095}], "download_size": 5602565274, "dataset_size": 7375920768}, {"config_name": "20220301.frr", "features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9129760, "num_examples": 15199}], "download_size": 12438017, "dataset_size": 9129760}, {"config_name": "20220301.it", "features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4539944448, "num_examples": 1743035}], "download_size": 3516441239, "dataset_size": 4539944448}, {"config_name": "20220301.simple", "features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 235072360, "num_examples": 205328}], "download_size": 239682796, "dataset_size": 235072360}]}
2023-10-08T09:37:06+00:00
[]
[ "aa", "ab", "ace", "af", "ak", "als", "am", "an", "ang", "ar", "arc", "arz", "as", "ast", "atj", "av", "ay", "az", "azb", "ba", "bar", "bcl", "be", "bg", "bh", "bi", "bjn", "bm", "bn", "bo", "bpy", "br", "bs", "bug", "bxr", "ca", "cbk", "cdo", "ce", "ceb", "ch", "cho", "chr", "chy", "ckb", "co", "cr", "crh", "cs", "csb", "cu", "cv", "cy", "da", "de", "din", "diq", "dsb", "dty", "dv", "dz", "ee", "el", "eml", "en", "eo", "es", "et", "eu", "ext", "fa", "ff", "fi", "fj", "fo", "fr", "frp", "frr", "fur", "fy", "ga", "gag", "gan", "gd", "gl", "glk", "gn", "gom", "gor", "got", "gu", "gv", "ha", "hak", "haw", "he", "hi", "hif", "ho", "hr", "hsb", "ht", "hu", "hy", "ia", "id", "ie", "ig", "ii", "ik", "ilo", "inh", "io", "is", "it", "iu", "ja", "jam", "jbo", "jv", "ka", "kaa", "kab", "kbd", "kbp", "kg", "ki", "kj", "kk", "kl", "km", "kn", "ko", "koi", "krc", "ks", "ksh", "ku", "kv", "kw", "ky", "la", "lad", "lb", "lbe", "lez", "lfn", "lg", "li", "lij", "lmo", "ln", "lo", "lrc", "lt", "ltg", "lv", "lzh", "mai", "mdf", "mg", "mh", "mhr", "mi", "min", "mk", "ml", "mn", "mr", "mrj", "ms", "mt", "mus", "mwl", "my", "myv", "mzn", "na", "nah", "nan", "nap", "nds", "ne", "new", "ng", "nl", "nn", "no", "nov", "nrf", "nso", "nv", "ny", "oc", "olo", "om", "or", "os", "pa", "pag", "pam", "pap", "pcd", "pdc", "pfl", "pi", "pih", "pl", "pms", "pnb", "pnt", "ps", "pt", "qu", "rm", "rmy", "rn", "ro", "ru", "rue", "rup", "rw", "sa", "sah", "sat", "sc", "scn", "sco", "sd", "se", "sg", "sgs", "sh", "si", "sk", "sl", "sm", "sn", "so", "sq", "sr", "srn", "ss", "st", "stq", "su", "sv", "sw", "szl", "ta", "tcy", "tdt", "te", "tg", "th", "ti", "tk", "tl", "tn", "to", "tpi", "tr", "ts", "tt", "tum", "tw", "ty", "tyv", "udm", "ug", "uk", "ur", "uz", "ve", "vec", "vep", "vi", "vls", "vo", "vro", "wa", "war", "wo", "wuu", "xal", "xh", "xmf", "yi", "yo", "yue", "za", "zea", "zh", "zu" ]
TAGS #task_categories-text-generation #task_categories-fill-mask #task_ids-language-modeling #task_ids-masked-language-modeling #annotations_creators-no-annotation #language_creators-crowdsourced #multilinguality-multilingual #size_categories-n<1K #size_categories-1K<n<10K #size_categories-10K<n<100K #size_categories-100K<n<1M #size_categories-1M<n<10M #source_datasets-original #language-Afar #language-Abkhazian #language-Achinese #language-Afrikaans #language-Akan #language-Tosk Albanian #language-Amharic #language-Aragonese #language-Old English (ca. 450-1100) #language-Arabic #language-Official Aramaic (700-300 BCE) #language-Egyptian Arabic #language-Assamese #language-Asturian #language-Atikamekw #language-Avaric #language-Aymara #language-Azerbaijani #language-South Azerbaijani #language-Bashkir #language-Bavarian #language-Central Bikol #language-Belarusian #language-Bulgarian #language-bh #language-Bislama #language-Banjar #language-Bambara #language-Bengali #language-Tibetan #language-Bishnupriya #language-Breton #language-Bosnian #language-Buginese #language-Russia Buriat #language-Catalan #language-Chavacano #language-Min Dong Chinese #language-Chechen #language-Cebuano #language-Chamorro #language-Choctaw #language-Cherokee #language-Cheyenne #language-Central Kurdish #language-Corsican #language-Cree #language-Crimean Tatar #language-Czech #language-Kashubian #language-Church Slavic #language-Chuvash #language-Welsh #language-Danish #language-German #language-Dinka #language-Dimli (individual language) #language-Lower Sorbian #language-Dotyali #language-Dhivehi #language-Dzongkha #language-Ewe #language-Modern Greek (1453-) #language-Emiliano-Romagnolo #language-English #language-Esperanto #language-Spanish #language-Estonian #language-Basque #language-Extremaduran #language-Persian #language-Fulah #language-Finnish #language-Fijian #language-Faroese #language-French #language-Arpitan #language-Northern Frisian #language-Friulian #language-Western Frisian #language-Irish #language-Gagauz #language-Gan Chinese #language-Scottish Gaelic #language-Galician #language-Gilaki #language-Guarani #language-Goan Konkani #language-Gorontalo #language-Gothic #language-Gujarati #language-Manx #language-Hausa #language-Hakka Chinese #language-Hawaiian #language-Hebrew #language-Hindi #language-Fiji Hindi #language-Hiri Motu #language-Croatian #language-Upper Sorbian #language-Haitian #language-Hungarian #language-Armenian #language-Interlingua (International Auxiliary Language Association) #language-Indonesian #language-Interlingue #language-Igbo #language-Sichuan Yi #language-Inupiaq #language-Iloko #language-Ingush #language-Ido #language-Icelandic #language-Italian #language-Inuktitut #language-Japanese #language-Jamaican Creole English #language-Lojban #language-Javanese #language-Georgian #language-Kara-Kalpak #language-Kabyle #language-Kabardian #language-Kabiyè #language-Kongo #language-Kikuyu #language-Kuanyama #language-Kazakh #language-Kalaallisut #language-Khmer #language-Kannada #language-Korean #language-Komi-Permyak #language-Karachay-Balkar #language-Kashmiri #language-Kölsch #language-Kurdish #language-Komi #language-Cornish #language-Kirghiz #language-Latin #language-Ladino #language-Luxembourgish #language-Lak #language-Lezghian #language-Lingua Franca Nova #language-Ganda #language-Limburgan #language-Ligurian #language-Lombard #language-Lingala #language-Lao #language-Northern Luri #language-Lithuanian #language-Latgalian #language-Latvian #language-Literary Chinese #language-Maithili #language-Moksha #language-Malagasy #language-Marshallese #language-Eastern Mari #language-Maori #language-Minangkabau #language-Macedonian #language-Malayalam #language-Mongolian #language-Marathi #language-Western Mari #language-Malay (macrolanguage) #language-Maltese #language-Creek #language-Mirandese #language-Burmese #language-Erzya #language-Mazanderani #language-Nauru #language-nah #language-Min Nan Chinese #language-Neapolitan #language-Low German #language-Nepali (macrolanguage) #language-Newari #language-Ndonga #language-Dutch #language-Norwegian Nynorsk #language-Norwegian #language-Novial #language-Jèrriais #language-Pedi #language-Navajo #language-Nyanja #language-Occitan (post 1500) #language-Livvi #language-Oromo #language-Oriya (macrolanguage) #language-Ossetian #language-Panjabi #language-Pangasinan #language-Pampanga #language-Papiamento #language-Picard #language-Pennsylvania German #language-Pfaelzisch #language-Pali #language-Pitcairn-Norfolk #language-Polish #language-Piemontese #language-Western Panjabi #language-Pontic #language-Pushto #language-Portuguese #language-Quechua #language-Romansh #language-Vlax Romani #language-Rundi #language-Romanian #language-Russian #language-Rusyn #language-Macedo-Romanian #language-Kinyarwanda #language-Sanskrit #language-Yakut #language-Santali #language-Sardinian #language-Sicilian #language-Scots #language-Sindhi #language-Northern Sami #language-Sango #language-Samogitian #language-Serbo-Croatian #language-Sinhala #language-Slovak #language-Slovenian #language-Samoan #language-Shona #language-Somali #language-Albanian #language-Serbian #language-Sranan Tongo #language-Swati #language-Southern Sotho #language-Saterfriesisch #language-Sundanese #language-Swedish #language-Swahili (macrolanguage) #language-Silesian #language-Tamil #language-Tulu #language-Tetun Dili #language-Telugu #language-Tajik #language-Thai #language-Tigrinya #language-Turkmen #language-Tagalog #language-Tswana #language-Tonga (Tonga Islands) #language-Tok Pisin #language-Turkish #language-Tsonga #language-Tatar #language-Tumbuka #language-Twi #language-Tahitian #language-Tuvinian #language-Udmurt #language-Uighur #language-Ukrainian #language-Urdu #language-Uzbek #language-Venda #language-Venetian #language-Veps #language-Vietnamese #language-Vlaams #language-Volapük #language-Võro #language-Walloon #language-Waray (Philippines) #language-Wolof #language-Wu Chinese #language-Kalmyk #language-Xhosa #language-Mingrelian #language-Yiddish #language-Yoruba #language-Yue Chinese #language-Zhuang #language-Zeeuws #language-Chinese #language-Zulu #license-cc-by-sa-3.0 #license-gfdl #region-us
Dataset Card for Wikipedia ========================== Table of Contents ----------------- * Dataset Description + Dataset Summary + Supported Tasks and Leaderboards + Languages * Dataset Structure + Data Instances + Data Fields + Data Splits * Dataset Creation + Curation Rationale + Source Data + Annotations + Personal and Sensitive Information * Considerations for Using the Data + Social Impact of Dataset + Discussion of Biases + Other Known Limitations * Additional Information + Dataset Curators + Licensing Information + Citation Information + Contributions Dataset Description ------------------- * Homepage: URL * Repository: * Paper: * Point of Contact: ### Dataset Summary Wikipedia dataset containing cleaned articles of all languages. The datasets are built from the Wikipedia dump (URL with one split per language. Each example contains the content of one full Wikipedia article with cleaning to strip markdown and unwanted sections (references, etc.). The articles are parsed using the ''mwparserfromhell'' tool. To load this dataset you need to install Apache Beam and ''mwparserfromhell'' first: Then, you can load any subset of Wikipedia per language and per date this way: where you can pass as 'beam\_runner' any Apache Beam supported runner for (distributed) data processing (see here). Pass "DirectRunner" to run it on your machine. You can find the full list of languages and dates here. Some subsets of Wikipedia have already been processed by HuggingFace, and you can load them just with: The list of pre-processed subsets is: * "URL" * "URL" * "URL" * "URL" * "URL" * "URL" ### Supported Tasks and Leaderboards The dataset is generally used for Language Modeling. ### Languages You can find the list of languages here. Dataset Structure ----------------- ### Data Instances An example looks as follows: Some subsets of Wikipedia have already been processed by HuggingFace, as you can see below: #### URL * Size of downloaded dataset files: 6.84 GB * Size of the generated dataset: 9.34 GB * Total amount of disk used: 16.18 GB #### URL * Size of downloaded dataset files: 21.60 GB * Size of the generated dataset: 21.26 GB * Total amount of disk used: 42.86 GB #### URL * Size of downloaded dataset files: 5.87 GB * Size of the generated dataset: 7.73 GB * Total amount of disk used: 13.61 GB #### URL * Size of downloaded dataset files: 13.04 MB * Size of the generated dataset: 9.57 MB * Total amount of disk used: 22.62 MB #### URL * Size of downloaded dataset files: 3.69 GB * Size of the generated dataset: 4.76 GB * Total amount of disk used: 8.45 GB #### URL * Size of downloaded dataset files: 251.32 MB * Size of the generated dataset: 246.49 MB * Total amount of disk used: 497.82 MB ### Data Fields The data fields are the same among all configurations: * 'id' ('str'): ID of the article. * 'url' ('str'): URL of the article. * 'title' ('str'): Title of the article. * 'text' ('str'): Text content of the article. ### Data Splits Here are the number of examples for several configurations: Dataset Creation ---------------- ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information Most of Wikipedia's text and many of its images are co-licensed under the Creative Commons Attribution-ShareAlike 3.0 Unported License (CC BY-SA) and the GNU Free Documentation License (GFDL) (unversioned, with no invariant sections, front-cover texts, or back-cover texts). Some text has been imported only under CC BY-SA and CC BY-SA-compatible license and cannot be reused under GFDL; such text will be identified on the page footer, in the page history, or on the discussion page of the article that utilizes the text. ### Contributions Thanks to @lewtun, @mariamabarham, @thomwolf, @lhoestq, @patrickvonplaten for adding this dataset.
[ "### Dataset Summary\n\n\nWikipedia dataset containing cleaned articles of all languages.\nThe datasets are built from the Wikipedia dump\n(URL with one split per language. Each example\ncontains the content of one full Wikipedia article with cleaning to strip\nmarkdown and unwanted sections (references, etc.).\n\n\nThe articles are parsed using the ''mwparserfromhell'' tool.\n\n\nTo load this dataset you need to install Apache Beam and ''mwparserfromhell'' first:\n\n\nThen, you can load any subset of Wikipedia per language and per date this way:\n\n\nwhere you can pass as 'beam\\_runner' any Apache Beam supported runner for (distributed) data processing\n(see here).\nPass \"DirectRunner\" to run it on your machine.\n\n\nYou can find the full list of languages and dates here.\n\n\nSome subsets of Wikipedia have already been processed by HuggingFace, and you can load them just with:\n\n\nThe list of pre-processed subsets is:\n\n\n* \"URL\"\n* \"URL\"\n* \"URL\"\n* \"URL\"\n* \"URL\"\n* \"URL\"", "### Supported Tasks and Leaderboards\n\n\nThe dataset is generally used for Language Modeling.", "### Languages\n\n\nYou can find the list of languages here.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nAn example looks as follows:\n\n\nSome subsets of Wikipedia have already been processed by HuggingFace, as you can see below:", "#### URL\n\n\n* Size of downloaded dataset files: 6.84 GB\n* Size of the generated dataset: 9.34 GB\n* Total amount of disk used: 16.18 GB", "#### URL\n\n\n* Size of downloaded dataset files: 21.60 GB\n* Size of the generated dataset: 21.26 GB\n* Total amount of disk used: 42.86 GB", "#### URL\n\n\n* Size of downloaded dataset files: 5.87 GB\n* Size of the generated dataset: 7.73 GB\n* Total amount of disk used: 13.61 GB", "#### URL\n\n\n* Size of downloaded dataset files: 13.04 MB\n* Size of the generated dataset: 9.57 MB\n* Total amount of disk used: 22.62 MB", "#### URL\n\n\n* Size of downloaded dataset files: 3.69 GB\n* Size of the generated dataset: 4.76 GB\n* Total amount of disk used: 8.45 GB", "#### URL\n\n\n* Size of downloaded dataset files: 251.32 MB\n* Size of the generated dataset: 246.49 MB\n* Total amount of disk used: 497.82 MB", "### Data Fields\n\n\nThe data fields are the same among all configurations:\n\n\n* 'id' ('str'): ID of the article.\n* 'url' ('str'): URL of the article.\n* 'title' ('str'): Title of the article.\n* 'text' ('str'): Text content of the article.", "### Data Splits\n\n\nHere are the number of examples for several configurations:\n\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information\n\n\nMost of Wikipedia's text and many of its images are co-licensed under the\nCreative Commons Attribution-ShareAlike 3.0 Unported License\n(CC BY-SA) and the GNU Free Documentation License\n(GFDL) (unversioned, with no invariant sections, front-cover texts, or back-cover texts).\n\n\nSome text has been imported only under CC BY-SA and CC BY-SA-compatible license and cannot be reused under GFDL; such\ntext will be identified on the page footer, in the page history, or on the discussion page of the article that utilizes\nthe text.", "### Contributions\n\n\nThanks to @lewtun, @mariamabarham, @thomwolf, @lhoestq, @patrickvonplaten for adding this dataset." ]
[ "TAGS\n#task_categories-text-generation #task_categories-fill-mask #task_ids-language-modeling #task_ids-masked-language-modeling #annotations_creators-no-annotation #language_creators-crowdsourced #multilinguality-multilingual #size_categories-n<1K #size_categories-1K<n<10K #size_categories-10K<n<100K #size_categories-100K<n<1M #size_categories-1M<n<10M #source_datasets-original #language-Afar #language-Abkhazian #language-Achinese #language-Afrikaans #language-Akan #language-Tosk Albanian #language-Amharic #language-Aragonese #language-Old English (ca. 450-1100) #language-Arabic #language-Official Aramaic (700-300 BCE) #language-Egyptian Arabic #language-Assamese #language-Asturian #language-Atikamekw #language-Avaric #language-Aymara #language-Azerbaijani #language-South Azerbaijani #language-Bashkir #language-Bavarian #language-Central Bikol #language-Belarusian #language-Bulgarian #language-bh #language-Bislama #language-Banjar #language-Bambara #language-Bengali #language-Tibetan #language-Bishnupriya #language-Breton #language-Bosnian #language-Buginese #language-Russia Buriat #language-Catalan #language-Chavacano #language-Min Dong Chinese #language-Chechen #language-Cebuano #language-Chamorro #language-Choctaw #language-Cherokee #language-Cheyenne #language-Central Kurdish #language-Corsican #language-Cree #language-Crimean Tatar #language-Czech #language-Kashubian #language-Church Slavic #language-Chuvash #language-Welsh #language-Danish #language-German #language-Dinka #language-Dimli (individual language) #language-Lower Sorbian #language-Dotyali #language-Dhivehi #language-Dzongkha #language-Ewe #language-Modern Greek (1453-) #language-Emiliano-Romagnolo #language-English #language-Esperanto #language-Spanish #language-Estonian #language-Basque #language-Extremaduran #language-Persian #language-Fulah #language-Finnish #language-Fijian #language-Faroese #language-French #language-Arpitan #language-Northern Frisian #language-Friulian #language-Western Frisian #language-Irish #language-Gagauz #language-Gan Chinese #language-Scottish Gaelic #language-Galician #language-Gilaki #language-Guarani #language-Goan Konkani #language-Gorontalo #language-Gothic #language-Gujarati #language-Manx #language-Hausa #language-Hakka Chinese #language-Hawaiian #language-Hebrew #language-Hindi #language-Fiji Hindi #language-Hiri Motu #language-Croatian #language-Upper Sorbian #language-Haitian #language-Hungarian #language-Armenian #language-Interlingua (International Auxiliary Language Association) #language-Indonesian #language-Interlingue #language-Igbo #language-Sichuan Yi #language-Inupiaq #language-Iloko #language-Ingush #language-Ido #language-Icelandic #language-Italian #language-Inuktitut #language-Japanese #language-Jamaican Creole English #language-Lojban #language-Javanese #language-Georgian #language-Kara-Kalpak #language-Kabyle #language-Kabardian #language-Kabiyè #language-Kongo #language-Kikuyu #language-Kuanyama #language-Kazakh #language-Kalaallisut #language-Khmer #language-Kannada #language-Korean #language-Komi-Permyak #language-Karachay-Balkar #language-Kashmiri #language-Kölsch #language-Kurdish #language-Komi #language-Cornish #language-Kirghiz #language-Latin #language-Ladino #language-Luxembourgish #language-Lak #language-Lezghian #language-Lingua Franca Nova #language-Ganda #language-Limburgan #language-Ligurian #language-Lombard #language-Lingala #language-Lao #language-Northern Luri #language-Lithuanian #language-Latgalian #language-Latvian #language-Literary Chinese #language-Maithili #language-Moksha #language-Malagasy #language-Marshallese #language-Eastern Mari #language-Maori #language-Minangkabau #language-Macedonian #language-Malayalam #language-Mongolian #language-Marathi #language-Western Mari #language-Malay (macrolanguage) #language-Maltese #language-Creek #language-Mirandese #language-Burmese #language-Erzya #language-Mazanderani #language-Nauru #language-nah #language-Min Nan Chinese #language-Neapolitan #language-Low German #language-Nepali (macrolanguage) #language-Newari #language-Ndonga #language-Dutch #language-Norwegian Nynorsk #language-Norwegian #language-Novial #language-Jèrriais #language-Pedi #language-Navajo #language-Nyanja #language-Occitan (post 1500) #language-Livvi #language-Oromo #language-Oriya (macrolanguage) #language-Ossetian #language-Panjabi #language-Pangasinan #language-Pampanga #language-Papiamento #language-Picard #language-Pennsylvania German #language-Pfaelzisch #language-Pali #language-Pitcairn-Norfolk #language-Polish #language-Piemontese #language-Western Panjabi #language-Pontic #language-Pushto #language-Portuguese #language-Quechua #language-Romansh #language-Vlax Romani #language-Rundi #language-Romanian #language-Russian #language-Rusyn #language-Macedo-Romanian #language-Kinyarwanda #language-Sanskrit #language-Yakut #language-Santali #language-Sardinian #language-Sicilian #language-Scots #language-Sindhi #language-Northern Sami #language-Sango #language-Samogitian #language-Serbo-Croatian #language-Sinhala #language-Slovak #language-Slovenian #language-Samoan #language-Shona #language-Somali #language-Albanian #language-Serbian #language-Sranan Tongo #language-Swati #language-Southern Sotho #language-Saterfriesisch #language-Sundanese #language-Swedish #language-Swahili (macrolanguage) #language-Silesian #language-Tamil #language-Tulu #language-Tetun Dili #language-Telugu #language-Tajik #language-Thai #language-Tigrinya #language-Turkmen #language-Tagalog #language-Tswana #language-Tonga (Tonga Islands) #language-Tok Pisin #language-Turkish #language-Tsonga #language-Tatar #language-Tumbuka #language-Twi #language-Tahitian #language-Tuvinian #language-Udmurt #language-Uighur #language-Ukrainian #language-Urdu #language-Uzbek #language-Venda #language-Venetian #language-Veps #language-Vietnamese #language-Vlaams #language-Volapük #language-Võro #language-Walloon #language-Waray (Philippines) #language-Wolof #language-Wu Chinese #language-Kalmyk #language-Xhosa #language-Mingrelian #language-Yiddish #language-Yoruba #language-Yue Chinese #language-Zhuang #language-Zeeuws #language-Chinese #language-Zulu #license-cc-by-sa-3.0 #license-gfdl #region-us \n", "### Dataset Summary\n\n\nWikipedia dataset containing cleaned articles of all languages.\nThe datasets are built from the Wikipedia dump\n(URL with one split per language. Each example\ncontains the content of one full Wikipedia article with cleaning to strip\nmarkdown and unwanted sections (references, etc.).\n\n\nThe articles are parsed using the ''mwparserfromhell'' tool.\n\n\nTo load this dataset you need to install Apache Beam and ''mwparserfromhell'' first:\n\n\nThen, you can load any subset of Wikipedia per language and per date this way:\n\n\nwhere you can pass as 'beam\\_runner' any Apache Beam supported runner for (distributed) data processing\n(see here).\nPass \"DirectRunner\" to run it on your machine.\n\n\nYou can find the full list of languages and dates here.\n\n\nSome subsets of Wikipedia have already been processed by HuggingFace, and you can load them just with:\n\n\nThe list of pre-processed subsets is:\n\n\n* \"URL\"\n* \"URL\"\n* \"URL\"\n* \"URL\"\n* \"URL\"\n* \"URL\"", "### Supported Tasks and Leaderboards\n\n\nThe dataset is generally used for Language Modeling.", "### Languages\n\n\nYou can find the list of languages here.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nAn example looks as follows:\n\n\nSome subsets of Wikipedia have already been processed by HuggingFace, as you can see below:", "#### URL\n\n\n* Size of downloaded dataset files: 6.84 GB\n* Size of the generated dataset: 9.34 GB\n* Total amount of disk used: 16.18 GB", "#### URL\n\n\n* Size of downloaded dataset files: 21.60 GB\n* Size of the generated dataset: 21.26 GB\n* Total amount of disk used: 42.86 GB", "#### URL\n\n\n* Size of downloaded dataset files: 5.87 GB\n* Size of the generated dataset: 7.73 GB\n* Total amount of disk used: 13.61 GB", "#### URL\n\n\n* Size of downloaded dataset files: 13.04 MB\n* Size of the generated dataset: 9.57 MB\n* Total amount of disk used: 22.62 MB", "#### URL\n\n\n* Size of downloaded dataset files: 3.69 GB\n* Size of the generated dataset: 4.76 GB\n* Total amount of disk used: 8.45 GB", "#### URL\n\n\n* Size of downloaded dataset files: 251.32 MB\n* Size of the generated dataset: 246.49 MB\n* Total amount of disk used: 497.82 MB", "### Data Fields\n\n\nThe data fields are the same among all configurations:\n\n\n* 'id' ('str'): ID of the article.\n* 'url' ('str'): URL of the article.\n* 'title' ('str'): Title of the article.\n* 'text' ('str'): Text content of the article.", "### Data Splits\n\n\nHere are the number of examples for several configurations:\n\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information\n\n\nMost of Wikipedia's text and many of its images are co-licensed under the\nCreative Commons Attribution-ShareAlike 3.0 Unported License\n(CC BY-SA) and the GNU Free Documentation License\n(GFDL) (unversioned, with no invariant sections, front-cover texts, or back-cover texts).\n\n\nSome text has been imported only under CC BY-SA and CC BY-SA-compatible license and cannot be reused under GFDL; such\ntext will be identified on the page footer, in the page history, or on the discussion page of the article that utilizes\nthe text.", "### Contributions\n\n\nThanks to @lewtun, @mariamabarham, @thomwolf, @lhoestq, @patrickvonplaten for adding this dataset." ]
[ 1978, 248, 21, 21, 36, 37, 38, 37, 37, 37, 42, 74, 23, 7, 4, 10, 10, 5, 5, 9, 18, 7, 8, 14, 6, 137, 39 ]
[ "passage: ", "passage: TAGS\n#task_categories-text-generation #task_categories-fill-mask #task_ids-language-modeling #task_ids-masked-language-modeling #annotations_creators-no-annotation #language_creators-crowdsourced #multilinguality-multilingual #size_categories-n<1K #size_categories-1K<n<10K #size_categories-10K<n<100K #size_categories-100K<n<1M #size_categories-1M<n<10M #source_datasets-original #language-Afar #language-Abkhazian #language-Achinese #language-Afrikaans #language-Akan #language-Tosk Albanian #language-Amharic #language-Aragonese #language-Old English (ca. 450-1100) #language-Arabic #language-Official Aramaic (700-300 BCE) #language-Egyptian Arabic #language-Assamese #language-Asturian #language-Atikamekw #language-Avaric #language-Aymara #language-Azerbaijani #language-South Azerbaijani #language-Bashkir #language-Bavarian #language-Central Bikol #language-Belarusian #language-Bulgarian #language-bh #language-Bislama #language-Banjar #language-Bambara #language-Bengali #language-Tibetan #language-Bishnupriya #language-Breton #language-Bosnian #language-Buginese #language-Russia Buriat #language-Catalan #language-Chavacano #language-Min Dong Chinese #language-Chechen #language-Cebuano #language-Chamorro #language-Choctaw #language-Cherokee #language-Cheyenne #language-Central Kurdish #language-Corsican #language-Cree #language-Crimean Tatar #language-Czech #language-Kashubian #language-Church Slavic #language-Chuvash #language-Welsh #language-Danish #language-German #language-Dinka #language-Dimli (individual language) #language-Lower Sorbian #language-Dotyali #language-Dhivehi #language-Dzongkha #language-Ewe #language-Modern Greek (1453-) #language-Emiliano-Romagnolo #language-English #language-Esperanto #language-Spanish #language-Estonian #language-Basque #language-Extremaduran #language-Persian #language-Fulah #language-Finnish #language-Fijian #language-Faroese #language-French #language-Arpitan #language-Northern Frisian #language-Friulian #language-Western Frisian #language-Irish #language-Gagauz #language-Gan Chinese #language-Scottish Gaelic #language-Galician #language-Gilaki #language-Guarani #language-Goan Konkani #language-Gorontalo #language-Gothic #language-Gujarati #language-Manx #language-Hausa #language-Hakka Chinese #language-Hawaiian #language-Hebrew #language-Hindi #language-Fiji Hindi #language-Hiri Motu #language-Croatian #language-Upper Sorbian #language-Haitian #language-Hungarian #language-Armenian #language-Interlingua (International Auxiliary Language Association) #language-Indonesian #language-Interlingue #language-Igbo #language-Sichuan Yi #language-Inupiaq #language-Iloko #language-Ingush #language-Ido #language-Icelandic #language-Italian #language-Inuktitut #language-Japanese #language-Jamaican Creole English #language-Lojban #language-Javanese #language-Georgian #language-Kara-Kalpak #language-Kabyle #language-Kabardian #language-Kabiyè #language-Kongo #language-Kikuyu #language-Kuanyama #language-Kazakh #language-Kalaallisut #language-Khmer #language-Kannada #language-Korean #language-Komi-Permyak #language-Karachay-Balkar #language-Kashmiri #language-Kölsch #language-Kurdish #language-Komi #language-Cornish #language-Kirghiz #language-Latin #language-Ladino #language-Luxembourgish #language-Lak #language-Lezghian #language-Lingua Franca Nova #language-Ganda #language-Limburgan #language-Ligurian #language-Lombard #language-Lingala #language-Lao #language-Northern Luri #language-Lithuanian #language-Latgalian #language-Latvian #language-Literary Chinese #language-Maithili #language-Moksha #language-Malagasy #language-Marshallese #language-Eastern Mari #language-Maori #language-Minangkabau #language-Macedonian #language-Malayalam #language-Mongolian #language-Marathi #language-Western Mari #language-Malay (macrolanguage) #language-Maltese #language-Creek #language-Mirandese #language-Burmese #language-Erzya #language-Mazanderani #language-Nauru #language-nah #language-Min Nan Chinese #language-Neapolitan #language-Low German #language-Nepali (macrolanguage) #language-Newari #language-Ndonga #language-Dutch #language-Norwegian Nynorsk #language-Norwegian #language-Novial #language-Jèrriais #language-Pedi #language-Navajo #language-Nyanja #language-Occitan (post 1500) #language-Livvi #language-Oromo #language-Oriya (macrolanguage) #language-Ossetian #language-Panjabi #language-Pangasinan #language-Pampanga #language-Papiamento #language-Picard #language-Pennsylvania German #language-Pfaelzisch #language-Pali #language-Pitcairn-Norfolk #language-Polish #language-Piemontese #language-Western Panjabi #language-Pontic #language-Pushto #language-Portuguese #language-Quechua #language-Romansh #language-Vlax Romani #language-Rundi #language-Romanian #language-Russian #language-Rusyn #language-Macedo-Romanian #language-Kinyarwanda #language-Sanskrit #language-Yakut #language-Santali #language-Sardinian #language-Sicilian #language-Scots #language-Sindhi #language-Northern Sami #language-Sango #language-Samogitian #language-Serbo-Croatian #language-Sinhala #language-Slovak #language-Slovenian #language-Samoan #language-Shona #language-Somali #language-Albanian #language-Serbian #language-Sranan Tongo #language-Swati #language-Southern Sotho #language-Saterfriesisch #language-Sundanese #language-Swedish #language-Swahili (macrolanguage) #language-Silesian #language-Tamil #language-Tulu #language-Tetun Dili #language-Telugu #language-Tajik #language-Thai #language-Tigrinya #language-Turkmen #language-Tagalog #language-Tswana #language-Tonga (Tonga Islands) #language-Tok Pisin #language-Turkish #language-Tsonga #language-Tatar #language-Tumbuka #language-Twi #language-Tahitian #language-Tuvinian #language-Udmurt #language-Uighur #language-Ukrainian #language-Urdu #language-Uzbek #language-Venda #language-Venetian #language-Veps #language-Vietnamese #language-Vlaams #language-Volapük #language-Võro #language-Walloon #language-Waray (Philippines) #language-Wolof #language-Wu Chinese #language-Kalmyk #language-Xhosa #language-Mingrelian #language-Yiddish #language-Yoruba #language-Yue Chinese #language-Zhuang #language-Zeeuws #language-Chinese #language-Zulu #license-cc-by-sa-3.0 #license-gfdl #region-us \n### Dataset Summary\n\n\nWikipedia dataset containing cleaned articles of all languages.\nThe datasets are built from the Wikipedia dump\n(URL with one split per language. Each example\ncontains the content of one full Wikipedia article with cleaning to strip\nmarkdown and unwanted sections (references, etc.).\n\n\nThe articles are parsed using the ''mwparserfromhell'' tool.\n\n\nTo load this dataset you need to install Apache Beam and ''mwparserfromhell'' first:\n\n\nThen, you can load any subset of Wikipedia per language and per date this way:\n\n\nwhere you can pass as 'beam\\_runner' any Apache Beam supported runner for (distributed) data processing\n(see here).\nPass \"DirectRunner\" to run it on your machine.\n\n\nYou can find the full list of languages and dates here.\n\n\nSome subsets of Wikipedia have already been processed by HuggingFace, and you can load them just with:\n\n\nThe list of pre-processed subsets is:\n\n\n* \"URL\"\n* \"URL\"\n* \"URL\"\n* \"URL\"\n* \"URL\"\n* \"URL\"### Supported Tasks and Leaderboards\n\n\nThe dataset is generally used for Language Modeling.### Languages\n\n\nYou can find the list of languages here.\n\n\nDataset Structure\n-----------------### Data Instances\n\n\nAn example looks as follows:\n\n\nSome subsets of Wikipedia have already been processed by HuggingFace, as you can see below:#### URL\n\n\n* Size of downloaded dataset files: 6.84 GB\n* Size of the generated dataset: 9.34 GB\n* Total amount of disk used: 16.18 GB#### URL\n\n\n* Size of downloaded dataset files: 21.60 GB\n* Size of the generated dataset: 21.26 GB\n* Total amount of disk used: 42.86 GB#### URL\n\n\n* Size of downloaded dataset files: 5.87 GB\n* Size of the generated dataset: 7.73 GB\n* Total amount of disk used: 13.61 GB#### URL\n\n\n* Size of downloaded dataset files: 13.04 MB\n* Size of the generated dataset: 9.57 MB\n* Total amount of disk used: 22.62 MB" ]
0fb976b53258bc27163f1b94f32c59ea13253d27
# Dataset Card for "uzh-hs23-etsp-eval-single-base-bar" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
hk-kaden-kim/uzh-hs23-etsp-eval-single-base-bar
[ "region:us" ]
2023-10-08T09:45:41+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "caption", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 5223052.0, "num_examples": 100}], "download_size": 5179034, "dataset_size": 5223052.0}}
2023-10-08T09:52:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "uzh-hs23-etsp-eval-single-base-bar" More Information needed
[ "# Dataset Card for \"uzh-hs23-etsp-eval-single-base-bar\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"uzh-hs23-etsp-eval-single-base-bar\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"uzh-hs23-etsp-eval-single-base-bar\"\n\nMore Information needed" ]
ad65713e4bd5ac2bcce62c1b93513e86152c0a97
# Dataset Card for "uzh-hs23-etsp-eval-single-base-line" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
hk-kaden-kim/uzh-hs23-etsp-eval-single-base-line
[ "region:us" ]
2023-10-08T09:45:49+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "caption", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 4026307.0, "num_examples": 100}], "download_size": 4011375, "dataset_size": 4026307.0}}
2023-10-08T09:53:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "uzh-hs23-etsp-eval-single-base-line" More Information needed
[ "# Dataset Card for \"uzh-hs23-etsp-eval-single-base-line\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"uzh-hs23-etsp-eval-single-base-line\"\n\nMore Information needed" ]
[ 6, 29 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"uzh-hs23-etsp-eval-single-base-line\"\n\nMore Information needed" ]