sha
stringlengths
40
40
text
stringlengths
0
13.4M
id
stringlengths
2
117
tags
list
created_at
stringlengths
25
25
metadata
stringlengths
2
31.7M
last_modified
stringlengths
25
25
66a8056b617eaed85e83fe96b678b7219229ff03
# Dataset Card for "eclassCorpus" This Dataset consists of names and descriptions from ECLASS-standard pump-properties. It can be used to evaluate models on the task of matching paraphrases to the ECLASS-standard pump-properties based on their semantics.
JoBeer/eclassCorpus
[ "region:us" ]
2022-11-05T11:10:39+00:00
{"dataset_info": {"features": [{"name": "did", "dtype": "int64"}, {"name": "query", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "datatype", "dtype": "string"}, {"name": "unit", "dtype": "string"}, {"name": "IRDI", "dtype": "string"}, {"name": "metalabel", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 137123, "num_examples": 672}], "download_size": 48203, "dataset_size": 137123}}
2023-01-07T12:35:44+00:00
c5883bfc76c2bc55ea74fede5d7b5271424b0e32
# Dataset Card for "eclassQuery" This Dataset consists of paraphrases of ECLASS-standard pump-properties. It can be used to evaluate models on the task of matching these paraphrases to the actual ECLASS-standard pump-properties based on their semantics.
JoBeer/eclassQuery
[ "task_categories:sentence-similarity", "size_categories:1K<n<10K", "language:en", "region:us" ]
2022-11-05T11:14:01+00:00
{"language": ["en"], "size_categories": ["1K<n<10K"], "task_categories": ["sentence-similarity"], "dataset_info": {"features": [{"name": "did", "dtype": "int64"}, {"name": "query", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "duplicate_id", "dtype": "int64"}, {"name": "metalabel", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 147176, "num_examples": 1040}, {"name": "eval", "num_bytes": 100846, "num_examples": 671}], "download_size": 113268, "dataset_size": 248022}}
2023-01-07T12:34:03+00:00
b0f8f64e6d681f84caa925de86b77e2a61f47903
# Dataset Card for "farsidecomics-blip-captions" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
maderix/farsidecomics-blip-captions
[ "region:us" ]
2022-11-05T11:29:45+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 37767218.0, "num_examples": 354}], "download_size": 37175120, "dataset_size": 37767218.0}}
2022-11-05T11:29:49+00:00
0e804efcc3d6ef4934e925e9ffc7d73f8d33f194
# Dataset Card for "diffusiondb_random_10k_zh_v1" svjack/diffusiondb_random_10k_zh_v1 is a dataset that random sample 10k English samples from [diffusiondb](https://github.com/poloclub/diffusiondb) and use [NMT](https://en.wikipedia.org/wiki/Neural_machine_translation) translate them into Chinese with some corrections.<br/> it used to train stable diffusion models in <br/> [svjack/Stable-Diffusion-FineTuned-zh-v0](https://huggingface.co/svjack/Stable-Diffusion-FineTuned-zh-v0)<br/> [svjack/Stable-Diffusion-FineTuned-zh-v1](https://huggingface.co/svjack/Stable-Diffusion-FineTuned-zh-v1)<br/> [svjack/Stable-Diffusion-FineTuned-zh-v2](https://huggingface.co/svjack/Stable-Diffusion-FineTuned-zh-v2)<br/> And is the data support of [https://github.com/svjack/Stable-Diffusion-Chinese-Extend](https://github.com/svjack/Stable-Diffusion-Chinese-Extend) which is a fine tune version of Stable Diffusion model on self-translate 10k diffusiondb Chinese Corpus and "extend" it. [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
svjack/diffusiondb_random_10k_zh_v1
[ "annotations_creators:machine-generated", "language_creators:other", "multilinguality:multilingual", "size_categories:10K", "language:en", "language:zh", "region:us" ]
2022-11-05T12:02:32+00:00
{"annotations_creators": ["machine-generated"], "language_creators": ["other"], "language": ["en", "zh"], "multilinguality": ["multilingual"], "size_categories": ["10K"], "pretty_name": "Pok\u00e9mon BLIP captions", "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "prompt", "dtype": "string"}, {"name": "seed", "dtype": "int64"}, {"name": "step", "dtype": "int64"}, {"name": "cfg", "dtype": "float32"}, {"name": "sampler", "dtype": "string"}, {"name": "zh_prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5826763233.4353, "num_examples": 9841}], "download_size": 5829710525, "dataset_size": 5826763233.4353}}
2022-11-08T04:08:23+00:00
8394ef7a7ccc5b2028f473be68097fc853febed0
KheemDH/data
[ "task_categories:text-classification", "task_ids:sentiment-analysis", "annotations_creators:other", "language_creators:other", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "license:other", "region:us" ]
2022-11-05T13:35:32+00:00
{"annotations_creators": ["other"], "language_creators": ["other"], "language": ["en"], "license": ["other"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["text-classification"], "task_ids": ["sentiment-analysis"], "pretty_name": "data", "tags": []}
2022-11-05T14:28:14+00:00
ced75dce72ba1810bd050272470b07b1db519ebc
# Dataset Card for "gal_yair_new" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
galman33/gal_yair_8300_1664x832
[ "region:us" ]
2022-11-05T14:04:29+00:00
{"dataset_info": {"features": [{"name": "lat", "dtype": "float64"}, {"name": "lon", "dtype": "float64"}, {"name": "country_code", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1502268207.4, "num_examples": 8300}], "download_size": 1410808567, "dataset_size": 1502268207.4}}
2022-11-05T14:54:09+00:00
7c31b58d1155a7597a693edaccb7fef7605a9b60
MJTidmarsh/Goon3_Test
[ "license:afl-3.0", "region:us" ]
2022-11-05T14:07:18+00:00
{"license": "afl-3.0"}
2022-11-05T14:16:55+00:00
c945b082ca08d0a8f3ba227fb78404a09614c36e
# Dataset Card for "counterfact-tracing" This is adapted from the counterfact dataset from the excellent [ROME paper](https://rome.baulab.info/) from David Bau and Kevin Meng. This is a dataset of 21919 factual relations, formatted as `data["prompt"]==f"{data['relation_prefix']}{data['subject']}{data['relation_suffix']}"`. Each has two responses `data["target_true"]` and `data["target_false"]` which is intended to go immediately after the prompt. The dataset was originally designed for memory editing in models. I made this for a research project doing mechanistic interpretability of how models recall factual knowledge, building on their causal tracing technique, and so stripped their data down to the information relevant to causal tracing. I also prepended spaces where relevant so that the subject and targets can be properly tokenized as is (spaces are always prepended to targets, and are prepended to subjects unless the subject is at the start of a sentence). Each fact has both a true and false target. I recommend measuring the logit *difference* between the true and false target (at least, if it's a single token target!), so as to control for eg the parts of the model which identify that it's supposed to be giving a fact of this type at all. (Idea inspired by the excellent [Interpretability In the Wild](https://arxiv.org/abs/2211.00593) paper).
NeelNanda/counterfact-tracing
[ "arxiv:2211.00593", "region:us" ]
2022-11-05T15:09:51+00:00
{"dataset_info": {"features": [{"name": "relation", "dtype": "string"}, {"name": "relation_prefix", "dtype": "string"}, {"name": "relation_suffix", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "relation_id", "dtype": "string"}, {"name": "target_false_id", "dtype": "string"}, {"name": "target_true_id", "dtype": "string"}, {"name": "target_true", "dtype": "string"}, {"name": "target_false", "dtype": "string"}, {"name": "subject", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3400668, "num_examples": 21919}], "download_size": 1109314, "dataset_size": 3400668}}
2022-11-05T15:19:43+00:00
9ce26cfd13b8a40a09229eb582d654bf774c11cb
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: w11wo/indonesian-roberta-base-indonli * Dataset: indonli * Config: indonli * Split: test_expert To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@afaji](https://huggingface.co/afaji) for evaluating this model.
autoevaluate/autoeval-eval-indonli-indonli-717ea6-1995866375
[ "autotrain", "evaluation", "region:us" ]
2022-11-05T18:25:54+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["indonli"], "eval_info": {"task": "natural_language_inference", "model": "w11wo/indonesian-roberta-base-indonli", "metrics": [], "dataset_name": "indonli", "dataset_config": "indonli", "dataset_split": "test_expert", "col_mapping": {"text1": "premise", "text2": "hypothesis", "target": "label"}}}
2022-11-05T18:26:33+00:00
423016fea124ff2ab30f5d8d3a6f19bb3d27e0a6
LiveEvil/ImRealSrry
[ "license:bigscience-openrail-m", "region:us" ]
2022-11-05T18:29:40+00:00
{"license": "bigscience-openrail-m"}
2022-11-05T18:29:40+00:00
357bc4f6af754b70dfbb6ced6f48e9728baa8e0d
# Dataset Card for BIOSSES ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://tabilab.cmpe.boun.edu.tr/BIOSSES/DataSet.html - **Repository:** https://github.com/gizemsogancioglu/biosses - **Paper:** [BIOSSES: a semantic sentence similarity estimation system for the biomedical domain](https://academic.oup.com/bioinformatics/article/33/14/i49/3953954) - **Point of Contact:** [Gizem Soğancıoğlu]([email protected]) and [Arzucan Özgür]([email protected]) ### Dataset Summary BIOSSES is a benchmark dataset for biomedical sentence similarity estimation. The dataset comprises 100 sentence pairs, in which each sentence was selected from the [TAC (Text Analysis Conference) Biomedical Summarization Track Training Dataset](https://tac.nist.gov/2014/BiomedSumm/) containing articles from the biomedical domain. The sentence pairs in BIOSSES were selected from citing sentences, i.e. sentences that have a citation to a reference article. The sentence pairs were evaluated by five different human experts that judged their similarity and gave scores ranging from 0 (no relation) to 4 (equivalent). In the original paper the mean of the scores assigned by the five human annotators was taken as the gold standard. The Pearson correlation between the gold standard scores and the scores estimated by the models was used as the evaluation metric. The strength of correlation can be assessed by the general guideline proposed by Evans (1996) as follows: - very strong: 0.80–1.00 - strong: 0.60–0.79 - moderate: 0.40–0.59 - weak: 0.20–0.39 - very weak: 0.00–0.19 ### Data Splits (From BLUE Benchmark) |name|Train|Dev|Test| |:--:|:--:|:--:|:--:| |biosses|64|16|20| ### Supported Tasks and Leaderboards Biomedical Semantic Similarity Scoring. ### Languages English. ## Dataset Structure ### Data Instances For each instance, there are two sentences (i.e. sentence 1 and 2), and its corresponding similarity score (the mean of the scores assigned by the five human annotators). ```json { "id": "0", "sentence1": "Centrosomes increase both in size and in microtubule-nucleating capacity just before mitotic entry.", "sentence2": "Functional studies showed that, when introduced into cell lines, miR-146a was found to promote cell proliferation in cervical cancer cells, which suggests that miR-146a works as an oncogenic miRNA in these cancers.", "score": 0.0 } ``` ### Data Fields - `sentence 1`: string - `sentence 2`: string - `score`: float ranging from 0 (no relation) to 4 (equivalent) ## Dataset Creation ### Curation Rationale ### Source Data The [TAC (Text Analysis Conference) Biomedical Summarization Track Training Dataset](https://tac.nist.gov/2014/BiomedSumm/). ### Annotations #### Annotation process The sentence pairs were evaluated by five different human experts that judged their similarity and gave scores ranging from 0 (no relation) to 4 (equivalent). The score range was described based on the guidelines of SemEval 2012 Task 6 on STS (Agirre et al., 2012). Besides the annotation instructions, example sentences from the biomedical literature were provided to the annotators for each of the similarity degrees. The table below shows the Pearson correlation of the scores of each annotator with respect to the average scores of the remaining four annotators. It is observed that there is strong association among the scores of the annotators. The lowest correlations are 0.902, which can be considered as an upper bound for an algorithmic measure evaluated on this dataset. | |Correlation r | |----------:|--------------:| |Annotator A| 0.952| |Annotator B| 0.958| |Annotator C| 0.917| |Annotator D| 0.902| |Annotator E| 0.941| ## Additional Information ### Dataset Curators - Gizem Soğancıoğlu, [email protected] - Hakime Öztürk, [email protected] - Arzucan Özgür, [email protected] Bogazici University, Istanbul, Turkey ### Licensing Information BIOSSES is made available under the terms of [The GNU Common Public License v.3.0](https://www.gnu.org/licenses/gpl-3.0.en.html). ### Citation Information ```bibtex @article{10.1093/bioinformatics/btx238, author = {Soğancıoğlu, Gizem and Öztürk, Hakime and Özgür, Arzucan}, title = "{BIOSSES: a semantic sentence similarity estimation system for the biomedical domain}", journal = {Bioinformatics}, volume = {33}, number = {14}, pages = {i49-i58}, year = {2017}, month = {07}, abstract = "{The amount of information available in textual format is rapidly increasing in the biomedical domain. Therefore, natural language processing (NLP) applications are becoming increasingly important to facilitate the retrieval and analysis of these data. Computing the semantic similarity between sentences is an important component in many NLP tasks including text retrieval and summarization. A number of approaches have been proposed for semantic sentence similarity estimation for generic English. However, our experiments showed that such approaches do not effectively cover biomedical knowledge and produce poor results for biomedical text.We propose several approaches for sentence-level semantic similarity computation in the biomedical domain, including string similarity measures and measures based on the distributed vector representations of sentences learned in an unsupervised manner from a large biomedical corpus. In addition, ontology-based approaches are presented that utilize general and domain-specific ontologies. Finally, a supervised regression based model is developed that effectively combines the different similarity computation metrics. A benchmark data set consisting of 100 sentence pairs from the biomedical literature is manually annotated by five human experts and used for evaluating the proposed methods.The experiments showed that the supervised semantic sentence similarity computation approach obtained the best performance (0.836 correlation with gold standard human annotations) and improved over the state-of-the-art domain-independent systems up to 42.6\\% in terms of the Pearson correlation metric.A web-based system for biomedical semantic sentence similarity computation, the source code, and the annotated benchmark data set are available at: http://tabilab.cmpe.boun.edu.tr/BIOSSES/.}", issn = {1367-4803}, doi = {10.1093/bioinformatics/btx238}, url = {https://doi.org/10.1093/bioinformatics/btx238}, eprint = {https://academic.oup.com/bioinformatics/article-pdf/33/14/i49/25157316/btx238.pdf}, } ``` ### Contributions Thanks to [@qanastek](https://github.com/qanastek) for adding this dataset.
qanastek/Biosses-BLUE
[ "task_categories:text-classification", "task_ids:text-scoring", "task_ids:semantic-similarity-scoring", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:original", "language:en", "license:gpl-3.0", "region:us" ]
2022-11-05T19:27:31+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["found"], "language": ["en"], "license": ["gpl-3.0"], "multilinguality": ["monolingual"], "size_categories": ["n<1K"], "source_datasets": ["original"], "task_categories": ["text-classification"], "task_ids": ["text-scoring", "semantic-similarity-scoring"], "paperswithcode_id": "biosses", "pretty_name": "BIOSSES", "dataset_info": {"features": [{"name": "sentence1", "dtype": "string"}, {"name": "sentence2", "dtype": "string"}, {"name": "score", "dtype": "float32"}], "splits": [{"name": "train", "num_bytes": 32783, "num_examples": 100}], "download_size": 36324, "dataset_size": 32783}}
2022-11-05T23:23:58+00:00
e9bad8693d5b42ddab7e1c15f2b5524680c5efb2
`duality_style, art by duality_style` this will give a monochrome, wings/feathers, flowers, and opposite reflection look. License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here
flamesbob/Duality_style
[ "license:creativeml-openrail-m", "region:us" ]
2022-11-05T20:34:29+00:00
{"license": "creativeml-openrail-m"}
2022-11-05T20:36:53+00:00
629e8a3f87e9ef4a9ec7d157e2946951c17983b2
ansondotdesign/roku
[ "license:afl-3.0", "region:us" ]
2022-11-05T20:54:07+00:00
{"license": "afl-3.0"}
2022-11-05T21:00:09+00:00
545e82b4d2819a24aae1ff54048ecf98b7b28231
# Dataset Card for "ade20k-panoptic-demo" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nielsr/ade20k-panoptic-demo
[ "region:us" ]
2022-11-05T21:16:00+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "image"}, {"name": "segments_info", "list": [{"name": "area", "dtype": "int64"}, {"name": "bbox", "sequence": "int64"}, {"name": "category_id", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "iscrowd", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 492746.0, "num_examples": 10}, {"name": "validation", "num_bytes": 461402.0, "num_examples": 10}], "download_size": 949392, "dataset_size": 954148.0}}
2022-11-06T17:13:22+00:00
8bdd59805ec01cc3920d42a7633083e4dea28265
# Lands Between Elden Ring Embedding / Textual Inversion ## Usage To use this embedding you have to download the file aswell as drop it into the "\stable-diffusion-webui\embeddings" folder Two different Versions: ### Version 1: File: ```lands_between``` To use it in a prompt: ```"art by lands_between"``` For best use write something like ```highly detailed background art by lands_between``` ### Version 2: File: ```elden_ring``` To use it in a prompt: ```"art by elden_ring"``` For best use write something like ```highly detailed background art by elden_ring``` If it is to strong just add [] around it. Trained until 7000 steps Have fun :) ## Example Pictures <table> <tr> <td><img src=https://i.imgur.com/Pajrsvy.png width=100% height=100%/></td> <td><img src=https://i.imgur.com/Bly3NJi.png width=100% height=100%/></td> <td><img src=https://i.imgur.com/IxLNgB6.png width=100% height=100%/></td> </tr> <tr> <td><img src=https://i.imgur.com/6rJ5ppD.png width=100% height=100%/></td> <td><img src=https://i.imgur.com/ueTEHtb.png width=100% height=100%/></td> <td><img src=https://i.imgur.com/dlVIwXs.png width=100% height=100%/></td> </tr> </table> ## License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
Nerfgun3/Elden_Ring_Embeddings
[ "language:en", "license:creativeml-openrail-m", "stable-diffusion", "text-to-image", "region:us" ]
2022-11-05T21:27:46+00:00
{"language": ["en"], "license": "creativeml-openrail-m", "tags": ["stable-diffusion", "text-to-image"], "inference": false}
2022-11-12T15:02:39+00:00
b1743a3eb280777e999ff98f0c9f00361b4042b2
# Dataset Card for "gal_yair_large" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
galman33/gal_yair_83000_1664x832
[ "region:us" ]
2022-11-05T21:36:49+00:00
{"dataset_info": {"features": [{"name": "lat", "dtype": "float64"}, {"name": "lon", "dtype": "float64"}, {"name": "country_code", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 12963511218.0, "num_examples": 83000}], "download_size": 14150729267, "dataset_size": 12963511218.0}}
2022-11-07T16:16:17+00:00
7603c0da12be1c4f630020fe27db2d972a5793f1
LiveEvil/Im
[ "license:openrail", "region:us" ]
2022-11-05T23:32:13+00:00
{"license": "openrail"}
2022-11-10T17:20:25+00:00
c0179e1d7304760d33b8fe4985288ea6d025eea2
# Dataset Card for "adj-n0ed8tdx-800-150-3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ebeaulac/adj-n0ed8tdx-800-150-3
[ "region:us" ]
2022-11-05T23:38:03+00:00
{"dataset_info": {"features": [{"name": "matrix", "sequence": {"sequence": "float64"}}, {"name": "is_adjacent", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 55909792, "num_examples": 1600}, {"name": "valid", "num_bytes": 10444854, "num_examples": 300}], "download_size": 48159452, "dataset_size": 66354646}}
2022-11-05T23:38:13+00:00
be5ccd50c1a5b6a629bfeead07d335977b77096a
# Dataset Card for "adj-n0ed8tdx-800-150-10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ebeaulac/adj-n0ed8tdx-800-150-10
[ "region:us" ]
2022-11-06T00:08:53+00:00
{"dataset_info": {"features": [{"name": "matrix", "sequence": {"sequence": "float64"}}, {"name": "is_adjacent", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 5311464, "num_examples": 1600}, {"name": "valid", "num_bytes": 993502, "num_examples": 300}], "download_size": 4985370, "dataset_size": 6304966}}
2022-11-06T00:09:01+00:00
9c5218da824ae86eab971b9f29cff7c813175c3e
Justbenoit/stable-test
[ "license:other", "region:us" ]
2022-11-06T01:08:57+00:00
{"license": "other"}
2022-11-06T01:08:57+00:00
decfdcc57efa83466449ccaa658ad431a8a416d4
# Dataset Summary 20M Vietnamese PubMed biomedical abstracts translated by the [state-of-the-art English-Vietnamese Translation project](https://arxiv.org/abs/2210.05610). The data has been used as unlabeled dataset for [pretraining a Vietnamese Biomedical-domain Transformer model](https://arxiv.org/abs/2210.05598). ![image](https://user-images.githubusercontent.com/44376091/200204462-4d559113-5bdf-4cc5-9e88-70abe82babba.png) image source: [Enriching Biomedical Knowledge for Vietnamese Low-resource Language Through Large-Scale Translation](https://arxiv.org/abs/2210.05598) # Language - English: Original biomedical abstracts from [Pubmed](https://www.nlm.nih.gov/databases/download/pubmed_medline_faq.html) - Vietnamese: Synthetic abstract translated by a [state-of-the-art English-Vietnamese Translation project](https://arxiv.org/abs/2210.05610) # Dataset Structure - The English sequences are - The Vietnamese sequences are # Source Data - Initial Data Collection and Normalization https://www.nlm.nih.gov/databases/download/pubmed_medline_faq.html # Licensing Information [Courtesy of the U.S. National Library of Medicine.](https://www.nlm.nih.gov/databases/download/terms_and_conditions.html) # Citation ``` @misc{mtet, doi = {10.48550/ARXIV.2210.05610}, url = {https://arxiv.org/abs/2210.05610}, author = {Ngo, Chinh and Trinh, Trieu H. and Phan, Long and Tran, Hieu and Dang, Tai and Nguyen, Hieu and Nguyen, Minh and Luong, Minh-Thang}, keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {MTet: Multi-domain Translation for English and Vietnamese}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` ``` @misc{vipubmed, doi = {10.48550/ARXIV.2210.05598}, url = {https://arxiv.org/abs/2210.05598}, author = {Phan, Long and Dang, Tai and Tran, Hieu and Phan, Vy and Chau, Lam D. and Trinh, Trieu H.}, keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Enriching Biomedical Knowledge for Vietnamese Low-resource Language Through Large-Scale Translation}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
VietAI/vi_pubmed
[ "task_categories:text-generation", "task_categories:fill-mask", "task_ids:language-modeling", "task_ids:masked-language-modeling", "language:vi", "language:en", "license:cc", "arxiv:2210.05610", "arxiv:2210.05598", "region:us" ]
2022-11-06T01:36:50+00:00
{"language": ["vi", "en"], "license": "cc", "task_categories": ["text-generation", "fill-mask"], "task_ids": ["language-modeling", "masked-language-modeling"], "paperswithcode_id": "pubmed", "dataset_info": {"features": [{"name": "en", "dtype": "string"}, {"name": "vi", "dtype": "string"}], "splits": [{"name": "pubmed22", "num_bytes": 44360028980, "num_examples": 20087006}], "download_size": 23041004247, "dataset_size": 44360028980}}
2024-01-09T10:03:00+00:00
2dc0655925b2c848b6c86b68ba6ebad82bfec491
# Dataset Card for PubMed ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** : [https://www.nlm.nih.gov/databases/download/pubmed_medline.html]() - **Documentation:** : [https://www.nlm.nih.gov/databases/download/pubmed_medline_documentation.html]() - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary NLM produces a baseline set of MEDLINE/PubMed citation records in XML format for download on an annual basis. The annual baseline is released in December of each year. Each day, NLM produces update files that include new, revised and deleted citations. See our documentation page for more information. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages - English ## Dataset Structure Bear in mind the data comes from XML that have various tags that are hard to reflect in a concise JSON format. Tags and list are kind of non "natural" to XML documents leading this library to make some choices regarding data. "Journal" info was dropped altogether as it would have led to many fields being empty all the time. The hierarchy is also a bit unnatural but the choice was made to keep as close as possible to the original data for future releases that may change schema from NLM's side. Author has been kept and contains either "ForeName", "LastName", "Initials", or "CollectiveName". (All the fields will be present all the time, but only some will be filled) ### Data Instances ```json { "MedlineCitation": { "PMID": 0, "DateCompleted": {"Year": 0, "Month": 0, "Day": 0}, "NumberOfReferences": 0, "DateRevised": {"Year": 0, "Month": 0, "Day": 0}, "Article": { "Abstract": {"AbstractText": "Some abstract (can be missing)" }, "ArticleTitle": "Article title", "AuthorList": {"Author": [ {"FirstName": "John", "ForeName": "Doe", "Initials": "JD", "CollectiveName": ""} {"CollectiveName": "The Manhattan Project", "FirstName": "", "ForeName": "", "Initials": ""} ]}, "Language": "en", "GrantList": { "Grant": [], }, "PublicationTypeList": {"PublicationType": []}, }, "MedlineJournalInfo": {"Country": "France"}, "ChemicalList": {"Chemical": [{ "RegistryNumber": "XX", "NameOfSubstance": "Methanol" }]}, "CitationSubset": "AIM", "MeshHeadingList": { "MeshHeading": [], }, }, "PubmedData": { "ArticleIdList": {"ArticleId": "10.1002/bjs.1800650203"}, "PublicationStatus": "ppublish", "History": {"PubMedPubDate": [{"Year": 0, "Month": 0, "Day": 0}]}, "ReferenceList": [{"Citation": "Somejournal", "CitationId": 01}], }, } ``` ### Data Fields Main Fields will probably interest people are: - "MedlineCitation" > "Article" > "AuthorList" > "Author" - "MedlineCitation" > "Article" > "Abstract" > "AbstractText" - "MedlineCitation" > "Article" > "Article Title" - "MedlineCitation" > "ChemicalList" > "Chemical" - "MedlineCitation" > "NumberOfReferences" ### Data Splits There are no splits in this dataset. It is given as is. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [https://www.nlm.nih.gov/databases/download/pubmed_medline_faq.html]() #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [https://www.nlm.nih.gov/databases/download/terms_and_conditions.html]() ### Citation Information [Courtesy of the U.S. National Library of Medicine](https://www.nlm.nih.gov/databases/download/terms_and_conditions.html). ### Contributions Thanks to [@Narsil](https://github.com/Narsil) for adding this dataset.
justinphan3110/vi_pubmed
[ "task_categories:text-generation", "task_categories:fill-mask", "task_categories:text-classification", "task_ids:language-modeling", "task_ids:masked-language-modeling", "task_ids:text-scoring", "task_ids:topic-classification", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:10M<n<100M", "source_datasets:original", "language:en", "license:other", "region:us" ]
2022-11-06T01:39:06+00:00
{"annotations_creators": ["crowdsourced"], "language_creators": ["crowdsourced"], "language": ["en"], "license": ["other"], "multilinguality": ["monolingual"], "size_categories": ["10M<n<100M"], "source_datasets": ["original"], "task_categories": ["text-generation", "fill-mask", "text-classification"], "task_ids": ["language-modeling", "masked-language-modeling", "text-scoring", "topic-classification"], "paperswithcode_id": "pubmed", "pretty_name": "ViPubMed", "split": ["en", "vi"]}
2022-11-06T21:02:17+00:00
ed7cc1bbeea46791a75ece509a414c12fd264167
# Hashtag Prediction Dataset from paper TwHIN-BERT: A Socially-Enriched Pre-trained Language Model for Multilingual Tweet Representations [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-green.svg?style=flat-square)](https://huggingface.co/datasets/Twitter/HashtagPrediction/discussions) [![arXiv](https://img.shields.io/badge/arXiv-2203.15827-b31b1b.svg)](https://arxiv.org/abs/2209.07562) [![Github](https://img.shields.io/badge/Github-TwHIN--BERT-brightgreen?logo=github)](https://github.com/xinyangz/TwHIN-BERT) This repo contains the Hashtag prediction dataset from our paper [TwHIN-BERT: A Socially-Enriched Pre-trained Language Model for Multilingual Tweet Representations](https://arxiv.org/abs/2209.07562). <br /> [[arXiv]](https://arxiv.org/abs/2209.07562) [[HuggingFace Models]](https://huggingface.co/Twitter/twhin-bert-base) [[Github repo]](https://github.com/xinyangz/TwHIN-BERT) <a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>. ## Download Use the `hashtag-classification-id.zip` in this repo. [Link](https://huggingface.co/datasets/Twitter/HashtagPrediction/blob/main/hashtag-classification-id.zip). Check the first-author's GitHub repo for any supplemental dataset material or code. [Link](https://github.com/xinyangz/TwHIN-BERT) ## Dataset Description The hashtag prediction dataset is a multilingual classification dataset. Separate datasets are given for different languages. We first select 500 (or all available) popular hashtags of each language and then sample 10k (or all available) popular Tweets that contain these hashtags. We make sure each Tweet will have exactly one of the selected hashtags. The evaluation task is a multiclass classification task, with hashtags as labels. We remove the hashtag from the Tweet, and let the model predict the removed hashtag. We provide Tweet ID and raw text hashtag labels in `tsv` files. For each language, we provide train, development, and test splits. To use the dataset, you must hydrate the Tweet text with [Twitter API](https://developer.twitter.com/en/docs/twitter-api), and **remove the hashtag used for label from each Tweet** . The data format is displayed below. | ID | label | | ------------- | ------------- | | 1 | hashtag | | 2 | another hashtag | ## Citation If you use our dataset in your work, please cite the following: ```bib @article{zhang2022twhin, title={TwHIN-BERT: A Socially-Enriched Pre-trained Language Model for Multilingual Tweet Representations}, author={Zhang, Xinyang and Malkov, Yury and Florez, Omar and Park, Serim and McWilliams, Brian and Han, Jiawei and El-Kishky, Ahmed}, journal={arXiv preprint arXiv:2209.07562}, year={2022} } ```
Twitter/HashtagPrediction
[ "language:sl", "language:ur", "language:sd", "language:pl", "language:vi", "language:sv", "language:am", "language:da", "language:mr", "language:no", "language:gu", "language:in", "language:ja", "language:el", "language:lv", "language:it", "language:ca", "language:is", "language:cs", "language:te", "language:tl", "language:ro", "language:ckb", "language:pt", "language:ps", "language:zh", "language:sr", "language:pa", "language:si", "language:ml", "language:ht", "language:kn", "language:ar", "language:hu", "language:nl", "language:bg", "language:bn", "language:ne", "language:hi", "language:de", "language:ko", "language:fi", "language:fr", "language:es", "language:et", "language:en", "language:fa", "language:lt", "language:or", "language:cy", "language:eu", "language:iw", "language:ta", "language:th", "language:tr", "license:cc-by-4.0", "Twitter", "Multilingual", "Classification", "Benchmark", "arxiv:2209.07562", "region:us" ]
2022-11-06T02:52:17+00:00
{"language": ["sl", "ur", "sd", "pl", "vi", "sv", "am", "da", "mr", false, "gu", "in", "ja", "el", "lv", "it", "ca", "is", "cs", "te", "tl", "ro", "ckb", "pt", "ps", "zh", "sr", "pa", "si", "ml", "ht", "kn", "ar", "hu", "nl", "bg", "bn", "ne", "hi", "de", "ko", "fi", "fr", "es", "et", "en", "fa", "lt", "or", "cy", "eu", "iw", "ta", "th", "tr"], "license": "cc-by-4.0", "tags": ["Twitter", "Multilingual", "Classification", "Benchmark"]}
2022-11-21T21:22:07+00:00
1318399b8a06c168778330d254e31d6a5bc5796d
hkgkjg111/color1
[ "region:us" ]
2022-11-06T06:46:20+00:00
{}
2022-11-06T06:47:28+00:00
5b97d8f7a59d0414050b58e9cdb2c48fc78ec1a1
# Dataset Card for Machine Paraphrase Dataset (MPC) ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Repository:** https://github.com/jpwahle/iconf22-paraphrase - **Paper:** https://link.springer.com/chapter/10.1007/978-3-030-96957-8_34 - **Total size:** 533 MB - **Train size:** 340 MB - **Test size:** 193 MB ### Dataset Summary The Machine Paraphrase Corpus (MPC) consists of ~200k examples of original, and paraphrases using two online paraphrasing tools. It uses two paraphrasing tools (SpinnerChief, SpinBot) on three source texts (Wikipedia, arXiv, student theses). The examples are **not** aligned, i.e., we sample different paragraphs for originals and paraphrased versions. ### How to use it You can load the dataset using the `load_dataset` function: ```python from datasets import load_dataset ds = load_dataset("jpwahle/machine-paraphrase-dataset") print(ds[0]) #OUTPUT: { 'text': 'The commemoration was revealed on Whit Monday 16 May 1921 by the Prince of Wales later King Edward VIII with Lutyens in participation At the divulging function Lord Fortescue gave a discourse in which he evaluated that 11600 people from Devon had been slaughtered while serving in the war He later expressed that somewhere in the range of 63700 8000 regulars 36700 volunteers and 19000 recruits had served in the military The names of the fallen were recorded on a move of respect of which three duplicates were made one for Exeter Cathedral one to be held by the district chamber and one which the Prince of Wales put in an empty in the base of the war dedication The rulers visit created impressive energy in the zone A large number of individuals lined the road to welcome his motorcade and shops on the High Street hung out pennants with inviting messages After the uncovering Edward went through ten days visiting the neighborhood ', 'label': 1, 'dataset': 'wikipedia', 'method': 'spinbot' } ``` ### Supported Tasks and Leaderboards Paraphrase Identification ### Languages English ## Dataset Structure ### Data Instances ```json { 'text': 'The commemoration was revealed on Whit Monday 16 May 1921 by the Prince of Wales later King Edward VIII with Lutyens in participation At the divulging function Lord Fortescue gave a discourse in which he evaluated that 11600 people from Devon had been slaughtered while serving in the war He later expressed that somewhere in the range of 63700 8000 regulars 36700 volunteers and 19000 recruits had served in the military The names of the fallen were recorded on a move of respect of which three duplicates were made one for Exeter Cathedral one to be held by the district chamber and one which the Prince of Wales put in an empty in the base of the war dedication The rulers visit created impressive energy in the zone A large number of individuals lined the road to welcome his motorcade and shops on the High Street hung out pennants with inviting messages After the uncovering Edward went through ten days visiting the neighborhood ', 'label': 1, 'dataset': 'wikipedia', 'method': 'spinbot' } ``` ### Data Fields | Feature | Description | | --- | --- | | `text` | The unique identifier of the paper. | | `label` | Whether it is a paraphrase (1) or the original (0). | | `dataset` | The source dataset (Wikipedia, arXiv, or theses). | | `method` | The method used (SpinBot, SpinnerChief, original). | ### Data Splits - train (Wikipedia x Spinbot) - test ([Wikipedia, arXiv, theses] x [SpinBot, SpinnerChief]) ## Dataset Creation ### Curation Rationale Providing a resource for testing against machine-paraprhased plagiarism. ### Source Data #### Initial Data Collection and Normalization - Paragraphs from `featured articles` from the English Wikipedia dump - Paragraphs from full-text pdfs of arXMLiv - Paragraphs from full-text pdfs of Czech student thesis (bachelor, master, PhD). #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [Jan Philip Wahle](https://jpwahle.com/) ### Licensing Information The Machine Paraphrase Dataset is released under CC BY-NC 4.0. By using this corpus, you agree to its usage terms. ### Citation Information ```bib @inproceedings{10.1007/978-3-030-96957-8_34, title = {Identifying Machine-Paraphrased Plagiarism}, author = {Wahle, Jan Philip and Ruas, Terry and Folt{\'y}nek, Tom{\'a}{\v{s}} and Meuschke, Norman and Gipp, Bela}, year = 2022, booktitle = {Information for a Better World: Shaping the Global Future}, publisher = {Springer International Publishing}, address = {Cham}, pages = {393--413}, isbn = {978-3-030-96957-8}, editor = {Smits, Malte}, abstract = {Employing paraphrasing tools to conceal plagiarized text is a severe threat to academic integrity. To enable the detection of machine-paraphrased text, we evaluate the effectiveness of five pre-trained word embedding models combined with machine learning classifiers and state-of-the-art neural language models. We analyze preprints of research papers, graduation theses, and Wikipedia articles, which we paraphrased using different configurations of the tools SpinBot and SpinnerChief. The best performing technique, Longformer, achieved an average F1 score of 80.99{\%} (F1 = 99.68{\%} for SpinBot and F1 = 71.64{\%} for SpinnerChief cases), while human evaluators achieved F1 = 78.4{\%} for SpinBot and F1 = 65.6{\%} for SpinnerChief cases. We show that the automated classification alleviates shortcomings of widely-used text-matching systems, such as Turnitin and PlagScan.} } ``` ### Contributions Thanks to [@jpwahle](https://github.com/jpwahle) for adding this dataset.
jpwahle/machine-paraphrase-dataset
[ "task_categories:text-classification", "task_categories:text-generation", "annotations_creators:machine-generated", "language_creators:machine-generated", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:en", "license:cc-by-4.0", "spinbot", "spinnerchief", "plagiarism", "paraphrase", "academic integrity", "arxiv", "wikipedia", "theses", "region:us" ]
2022-11-06T08:21:07+00:00
{"annotations_creators": ["machine-generated"], "language_creators": ["machine-generated"], "language": ["en"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["text-classification", "text-generation"], "task_ids": [], "paperswithcode_id": "identifying-machine-paraphrased-plagiarism", "pretty_name": "Machine Paraphrase Dataset (SpinnerChief/SpinBot)", "tags": ["spinbot", "spinnerchief", "plagiarism", "paraphrase", "academic integrity", "arxiv", "wikipedia", "theses"], "dataset_info": [{"split": "train", "download_size": 393224, "dataset_size": 393224}, {"split": "test", "download_size": 655376, "dataset_size": 655376}]}
2022-11-18T16:54:17+00:00
d3df6ced7063b572ef46aafd62bcbe953d196491
# Dataset Card for Machine Paraphrase Dataset (MPC) ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rat1.ionale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Paper:** https://ieeexplore.ieee.org/document/9651895 - **Total size:** 2.23 GB - **Train size:** 1.52 GB - **Test size:** 861 MB ### Dataset Summary The Autoencoder Paraphrase Corpus (APC) consists of ~200k examples of original, and paraphrases using three neural language models. It uses three models (BERT, RoBERTa, Longformer) on three source texts (Wikipedia, arXiv, student theses). The examples are aligned, i.e., we sample the same paragraphs for originals and paraphrased versions. ### How to use it You can load the dataset using the `load_dataset` function: ```python from datasets import load_dataset ds = load_dataset("jpwahle/autoencoder-paraphrase-dataset") print(ds[0]) #OUTPUT: { 'text': 'War memorial formally unveiled on Whit Monday 16 May 1921 by the Prince of Wales later King Edward VIII with Lutyens in attendance At the unveiling ceremony Captain Fortescue gave a speech during wherein he announced that 11 600 men and women from Devon had been inval while serving in imperialist war He later stated that some 63 700 8 000 regulars 36 700 volunteers 19 000 conscripts had served in the armed forces The heroism of the dead are recorded on a roll of honour of which three copies were made one for Exeter Cathedral one To be held by Tasman county council and another honoring the Prince of Wales placed in a hollow in bedrock base of the war memorial The princes visit generated considerable excitement in the area Thousands of spectators lined the street to greet his motorcade and shops on Market High Street hung out banners with welcoming messages After the unveiling Edward spent ten days touring the local area', 'label': 1, 'dataset': 'wikipedia', 'method': 'longformer' } ``` ### Supported Tasks and Leaderboards Paraphrase Identification ### Languages English ## Dataset Structure ### Data Instances ```json { 'text': 'War memorial formally unveiled on Whit Monday 16 May 1921 by the Prince of Wales later King Edward VIII with Lutyens in attendance At the unveiling ceremony Captain Fortescue gave a speech during wherein he announced that 11 600 men and women from Devon had been inval while serving in imperialist war He later stated that some 63 700 8 000 regulars 36 700 volunteers 19 000 conscripts had served in the armed forces The heroism of the dead are recorded on a roll of honour of which three copies were made one for Exeter Cathedral one To be held by Tasman county council and another honoring the Prince of Wales placed in a hollow in bedrock base of the war memorial The princes visit generated considerable excitement in the area Thousands of spectators lined the street to greet his motorcade and shops on Market High Street hung out banners with welcoming messages After the unveiling Edward spent ten days touring the local area', 'label': 1, 'dataset': 'wikipedia', 'method': 'longformer' } ``` ### Data Fields | Feature | Description | | --- | --- | | `text` | The unique identifier of the paper. | | `label` | Whether it is a paraphrase (1) or the original (0). | | `dataset` | The source dataset (Wikipedia, arXiv, or theses). | | `method` | The method used (bert, roberta, longformer). | ### Data Splits - train (Wikipedia x [bert, roberta, longformer]) - test ([Wikipedia, arXiv, theses] x [bert, roberta, longformer]) ## Dataset Creation ### Curation Rationale Providing a resource for testing against autoencoder paraprhased plagiarism. ### Source Data #### Initial Data Collection and Normalization - Paragraphs from `featured articles` from the English Wikipedia dump - Paragraphs from full-text pdfs of arXMLiv - Paragraphs from full-text pdfs of Czech student thesis (bachelor, master, PhD). #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [Jan Philip Wahle](https://jpwahle.com/) ### Licensing Information The Autoencoder Paraphrase Dataset is released under CC BY-NC 4.0. By using this corpus, you agree to its usage terms. ### Citation Information ```bib @inproceedings{9651895, title = {Are Neural Language Models Good Plagiarists? A Benchmark for Neural Paraphrase Detection}, author = {Wahle, Jan Philip and Ruas, Terry and Meuschke, Norman and Gipp, Bela}, year = 2021, booktitle = {2021 ACM/IEEE Joint Conference on Digital Libraries (JCDL)}, volume = {}, number = {}, pages = {226--229}, doi = {10.1109/JCDL52503.2021.00065} } ``` ### Contributions Thanks to [@jpwahle](https://github.com/jpwahle) for adding this dataset.
jpwahle/autoencoder-paraphrase-dataset
[ "task_categories:text-classification", "task_categories:text-generation", "annotations_creators:machine-generated", "language_creators:machine-generated", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:en", "license:cc-by-4.0", "bert", "roberta", "longformer", "plagiarism", "paraphrase", "academic integrity", "arxiv", "wikipedia", "theses", "region:us" ]
2022-11-06T08:28:10+00:00
{"annotations_creators": ["machine-generated"], "language_creators": ["machine-generated"], "language": ["en"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["text-classification", "text-generation"], "task_ids": [], "paperswithcode_id": "are-neural-language-models-good-plagiarists-a", "pretty_name": "Autoencoder Paraphrase Dataset (BERT, RoBERTa, Longformer)", "tags": ["bert", "roberta", "longformer", "plagiarism", "paraphrase", "academic integrity", "arxiv", "wikipedia", "theses"], "dataset_info": [{"split": "train", "download_size": 2980464, "dataset_size": 2980464}, {"split": "test", "download_size": 1690032, "dataset_size": 1690032}]}
2022-11-18T17:26:00+00:00
2ba342d0d668e896b3a805691ab3bcba5f8cc9d3
# Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Size:** 163MB - **Repository:** https://github.com/jpwahle/emnlp22-transforming - **Paper:** https://arxiv.org/abs/2210.03568 ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
jpwahle/autoregressive-paraphrase-dataset
[ "task_categories:text-classification", "task_categories:text-generation", "annotations_creators:machine-generated", "language_creators:machine-generated", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:en", "license:cc-by-4.0", "plagiarism", "paraphrase", "academic integrity", "arxiv", "wikipedia", "theses", "bert", "roberta", "t5", "gpt-3", "arxiv:2210.03568", "region:us" ]
2022-11-06T08:28:27+00:00
{"annotations_creators": ["machine-generated"], "language_creators": ["machine-generated"], "language": ["en"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["text-classification", "text-generation"], "task_ids": [], "pretty_name": "Machine Paraphrase Dataset (T5, GPT-3)", "tags": ["plagiarism", "paraphrase", "academic integrity", "arxiv", "wikipedia", "theses", "bert", "roberta", "t5", "gpt-3"]}
2022-11-19T12:14:43+00:00
f8cbdcb404b37b5804966dd9851064e01db9f4e6
Adapting/MLO
[ "license:mit", "region:us" ]
2022-11-06T09:23:15+00:00
{"license": "mit"}
2022-11-26T15:33:02+00:00
8712f2e0b993eefe0b12f604d726048951b2fe46
# Dataset Card for DBLP Discovery Dataset (D3) ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Repository:** https://github.com/jpwahle/lrec22-d3-dataset - **Paper:** https://aclanthology.org/2022.lrec-1.283/ - **Total size:** 8.71 GB ### Dataset Summary DBLP is the largest open-access repository of scientific articles on computer science and provides metadata associated with publications, authors, and venues. We retrieved more than 6 million publications from DBLP and extracted pertinent metadata (e.g., abstracts, author affiliations, citations) from the publication texts to create the DBLP Discovery Dataset (D3). D3 can be used to identify trends in research activity, productivity, focus, bias, accessibility, and impact of computer science research. We present an initial analysis focused on the volume of computer science research (e.g., number of papers, authors, research activity), trends in topics of interest, and citation patterns. Our findings show that computer science is a growing research field (15% annually), with an active and collaborative researcher community. While papers in recent years present more bibliographical entries in comparison to previous decades, the average number of citations has been declining. Investigating papers’ abstracts reveals that recent topic trends are clearly reflected in D3. Finally, we list further applications of D3 and pose supplemental research questions. The D3 dataset, our findings, and source code are publicly available for research purposes. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages English ## Dataset Structure ### Data Instances Total size: 8.71 GB Papers size: 8.13 GB Authors size: 0.58 GB ### Data Fields #### Papers | Feature | Description | | --- | --- | | `corpusid` | The unique identifier of the paper. | | `externalids` | The same paper in other repositories (e.g., DOI, ACL). | | `title` | The title of the paper. | | `authors` | The authors of the paper with their `authorid` and `name`. | | `venue` | The venue of the paper. | | `year` | The year of the paper publication. | | `publicationdate` | A more precise publication date of the paper. | | `abstract` | The abstract of the paper. | | `outgoingcitations` | The number of references of the paper. | | `ingoingcitations` | The number of citations of the paper. | | `isopenaccess` | Whether the paper is open access. | | `influentialcitationcount` | The number of influential citations of the paper according to SemanticScholar. | | `s2fieldsofstudy` | The fields of study of the paper according to SemanticScholar. | | `publicationtypes` | The publication types of the paper. | | `journal` | The journal of the paper. | | `updated` | The last time the paper was updated. | | `url` | A url to the paper in SemanticScholar. | #### Authors | Feature | Description | | --- | --- | | `authorid` | The unique identifier of the author. | | `externalids` | The same author in other repositories (e.g., ACL, PubMed). This can include `ORCID` | | `name` | The name of the author. | | `affiliations` | The affiliations of the author. | | `homepage` | The homepage of the author. | | `papercount` | The number of papers the author has written. | | `citationcount` | The number of citations the author has received. | | `hindex` | The h-index of the author. | | `updated` | The last time the author was updated. | | `email` | The email of the author. | | `s2url` | A url to the author in SemanticScholar. | ### Data Splits - `papers` - `authors` ## Dataset Creation ### Curation Rationale Providing a resource to analyze the state of computer science research statistically and semantically. ### Source Data #### Initial Data Collection and Normalization DBLP and from v2.0 SemanticScholar ## Additional Information ### Dataset Curators [Jan Philip Wahle](https://jpwahle.com/) ### Licensing Information The DBLP Discovery Dataset is released under the CC BY-NC 4.0. By using this corpus, you are agreeing to its usage terms. ### Citation Information If you use the dataset in any way, please cite: ```bib @inproceedings{Wahle2022c, title = {D3: A Massive Dataset of Scholarly Metadata for Analyzing the State of Computer Science Research}, author = {Wahle, Jan Philip and Ruas, Terry and Mohammad, Saif M. and Gipp, Bela}, year = {2022}, month = {July}, booktitle = {Proceedings of The 13th Language Resources and Evaluation Conference}, publisher = {European Language Resources Association}, address = {Marseille, France}, doi = {}, } ``` Also make sure to cite the following papers if you use SemanticScholar data: ```bib @inproceedings{ammar-etal-2018-construction, title = "Construction of the Literature Graph in Semantic Scholar", author = "Ammar, Waleed and Groeneveld, Dirk and Bhagavatula, Chandra and Beltagy, Iz", booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 3 (Industry Papers)", month = jun, year = "2018", address = "New Orleans - Louisiana", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/N18-3011", doi = "10.18653/v1/N18-3011", pages = "84--91", } ``` ```bib @inproceedings{lo-wang-2020-s2orc, title = "{S}2{ORC}: The Semantic Scholar Open Research Corpus", author = "Lo, Kyle and Wang, Lucy Lu and Neumann, Mark and Kinney, Rodney and Weld, Daniel", booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", month = jul, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.acl-main.447", doi = "10.18653/v1/2020.acl-main.447", pages = "4969--4983" } ```### Contributions Thanks to [@jpwahle](https://github.com/jpwahle) for adding this dataset.
jpwahle/dblp-discovery-dataset
[ "task_categories:other", "annotations_creators:found", "language_creators:found", "multilinguality:monolingual", "size_categories:1M<n<10M", "source_datasets:extended|s2orc", "language:en", "license:cc-by-4.0", "dblp", "s2", "scientometrics", "computer science", "papers", "arxiv", "region:us" ]
2022-11-06T09:42:13+00:00
{"annotations_creators": ["found"], "language_creators": ["found"], "language": ["en"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["1M<n<10M"], "source_datasets": ["extended|s2orc"], "task_categories": ["other"], "task_ids": [], "paperswithcode_id": "d3", "pretty_name": "DBLP Discovery Dataset (D3)", "tags": ["dblp", "s2", "scientometrics", "computer science", "papers", "arxiv"], "dataset_info": [{"config_name": "papers", "download_size": 15876152, "dataset_size": 15876152}, {"config_name": "authors", "download_size": 1177888, "dataset_size": 1177888}]}
2022-11-28T13:18:13+00:00
809cbb33cc56feb36861453482737011984d2e72
# Dataset Card for "amazon-reviews-input-output" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
AlekseyKorshuk/amazon-reviews-input-output
[ "region:us" ]
2022-11-06T10:49:53+00:00
{"dataset_info": {"features": [{"name": "input_text", "dtype": "string"}, {"name": "output_text", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 3105, "num_examples": 10}, {"name": "train", "num_bytes": 223383, "num_examples": 1000}, {"name": "validation", "num_bytes": 24145, "num_examples": 100}], "download_size": 160709, "dataset_size": 250633}}
2022-11-06T10:54:44+00:00
7257fc7041564826ef9e11c7eb25e520c553a23a
# Dataset Card for "minguostyle" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jojofan/minguostyle
[ "region:us" ]
2022-11-06T11:00:25+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 444193006.0, "num_examples": 944}], "download_size": 444181518, "dataset_size": 444193006.0}}
2022-11-20T10:23:03+00:00
3fe6546a4680db3b29a73ab9b6d8eeb955c7f3c3
# Dataset Card for "simpsons-blip-captions"
Norod78/simpsons-blip-captions
[ "task_categories:text-to-image", "annotations_creators:machine-generated", "language_creators:other", "multilinguality:monolingual", "size_categories:n<1K", "language:en", "license:cc-by-nc-sa-4.0", "region:us" ]
2022-11-06T11:11:36+00:00
{"annotations_creators": ["machine-generated"], "language_creators": ["other"], "language": ["en"], "license": "cc-by-nc-sa-4.0", "multilinguality": ["monolingual"], "size_categories": ["n<1K"], "task_categories": ["text-to-image"], "pretty_name": "Simpsons BLIP captions", "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 51605730.0, "num_examples": 755}], "download_size": 50553165, "dataset_size": 51605730.0}, "tags": []}
2022-11-09T16:27:19+00:00
1af6f6156e7344bdc0066a872bbd65f971eb2a93
Renanriozz/Renanzzz
[ "license:afl-3.0", "region:us" ]
2022-11-06T12:51:25+00:00
{"license": "afl-3.0"}
2022-11-06T12:51:25+00:00
df2ff8dcc6a6444f74d735d16d12b50d9c25fbab
# Dataset Card for "processed_bert_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sanagnos/processed_bert_dataset
[ "region:us" ]
2022-11-06T12:54:51+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "token_type_ids", "sequence": "int8"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "special_tokens_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 24027415200.0, "num_examples": 6674282}], "download_size": 5731603526, "dataset_size": 24027415200.0}}
2022-11-06T22:27:01+00:00
9655fd7b4d3c9b841446e3687c720f766372ca4c
# Dataset Card for "vqgan1024_reconstruction" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
maloyan/vqgan1024_reconstruction
[ "region:us" ]
2022-11-06T13:36:33+00:00
{"dataset_info": {"features": [{"name": "image_512", "dtype": "image"}, {"name": "image_256", "dtype": "image"}, {"name": "reconstruction_256", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 3446042724.0, "num_examples": 100000}], "download_size": 4331449801, "dataset_size": 3446042724.0}}
2022-11-06T13:40:50+00:00
d6db2b4d043c2ecbb182e356f38e39afa7a117df
awinml/costco_long_practice
[ "license:mit", "region:us" ]
2022-11-06T14:13:23+00:00
{"license": "mit"}
2022-11-18T04:34:12+00:00
3ba47917946a42d60ce0495fd5b4201f63472f6b
enriqueaf/molinillo_pimienta
[ "license:gpl-3.0", "region:us" ]
2022-11-06T14:50:21+00:00
{"license": "gpl-3.0"}
2022-11-06T14:50:46+00:00
b93efca2ce3c849127d7fd63cd188dfb357bd18b
Casulu/harold
[ "region:us" ]
2022-11-06T15:09:50+00:00
{}
2022-11-06T15:09:59+00:00
112a1953643ce80c81c9bdd37f751909cf10f4b6
# AutoTrain Dataset for project: csi5386 ## Dataset Description This dataset has been automatically processed by AutoTrain for project csi5386. ### Languages The BCP-47 code for the dataset's language is en. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "context": "Exhibit 10.1\n\nFORM OF SUB-RESELLER AGREEMENT\n\nSignature Page\n\nReseller Full Legal Name Salesforce.org, a nonprofit public benefit corporation having its principal place of business at 50 Fremont Street, Suite 300, San Francisco, California 94105\n\nThis Form of Sub-Reseller Agreement (this \"Sub-Reseller Agreement\") is made and entered in by and between salesforce.com, inc., a Delaware corporation having its principal place of business at The Landmark @ One Market, Suite 300, San Francisco, California 94105 (\"SFDC\" or \"Salesforce\") and the Reseller named above and amends that certain Reseller Agreement between Salesforce and Reseller dated as of August 1, 2015, as previously amended (the \"Agreement\"). This Sub-Reseller Agreement is effective as of the later of the dates beneath the Parties' signatures below (\"Sub-Reseller Effective Date\"), provided, however, that the dates of the Parties' signatures are not separated by a period of time greater than ten (10) business days. If such period is greater than ten (10) business days then this Sub-Reseller Agreement shall be deemed null and void and to be of no effect. Capitalized terms not defined herein shall have the meanings given to them in the Agreement.\n\nThe Parties, by their respective authorized signatories, have duly executed this Sub-Reseller Agreement as of the Sub-Reseller Effective Date.\n\nSalesforce.com, Inc. Reseller\n\nBy: By: Name: Name: Title: Title: Date: Date:\n\nSource: SALESFORCE.COM, INC., 10-Q, 11/22/2017\n\n\n\n\n\nExhibit 10.1\n\nSub-Reseller Agreement Terms & Conditions\n\n1. Resale Rights. SFDC hereby appoints SUB-RESELLER (\"Sub-Reseller\") as a sub-reseller to whom Reseller may resell Services in accordance with Section 2(ii) of the Agreement, provided that Sub-Reseller may only resell such Services to Customer. Reseller must ensure that Sub-Reseller complies with the terms of the Agreement applicable to Reseller as if Sub- Reseller were an original party to the Agreement and any breach by Sub-Reseller of the Agreement will be deemed a breach by Reseller. Sub-Reseller is not be a third-party beneficiary of the Agreement.\n\n2. Effect of Sub-Reseller Agreement. Subject to the above modifications, the Agreement remains in full force and effect.\n\n3. Entire Agreement. The terms and conditions herein contained constitute the entire agreement between the Parties with respect to the subject matter of this Sub-Reseller Agreement and supersede any previous and contemporaneous agreements and understandings, whether oral or written, between the Parties hereto with respect to the subject matter hereof.\n\n4. Counterparts. This Sub-Reseller Agreement may be executed in one or more counterparts, including facsimiles or scanned copies sent via email or otherwise, each of which will be deemed to be a duplicate original, but all of which, taken together, will be deemed to constitute a single instrument.\n\nSource: SALESFORCE.COM, INC., 10-Q, 11/22/2017", "question": "Highlight the parts (if any) of this contract related to \"Non-Disparagement\" that should be reviewed by a lawyer. Details: Is there a requirement on a party not to disparage the counterparty?", "answers.text": [ "" ], "answers.answer_start": [ -1 ], "feat_id": [ "SalesforcecomInc_20171122_10-Q_EX-10.1_10961535_EX-10.1_Reseller Agreement__Non-Disparagement_0" ], "feat_title": [ "SalesforcecomInc_20171122_10-Q_EX-10.1_10961535_EX-10.1_Reseller Agreement" ] }, { "context": "EXHIBIT 10.2\n\n DISTRIBUTOR AGREEMENT\n\nEXHIBIT 10.2\n\n EXCLUSIVE DISTRIBUTOR AGREEMENT\n\n THIS EXCLUSIVE DISTRIBUTOR AGREEMENT (the \"Agreement\") shall be effective as of _Dec. 8, 2005 (hereinafter \"Effective Date\"), by and between LifeUSA/ Envision Health, Inc., a corporation (hereinafter collectively \"ENVISION\"), and Sierra Mountain Minerals, Inc., a Canadian company (hereinafter \"SIERRA\"), is made with reference to the following facts:\n\n Recitals\n\nA. SIERRA is the manufacture and producer of a joint health product called \"SierraSil\" (hereinafter \"the Product\") for human use.\n\nB. ENVISION is the manufacturer of certain nutritional supplements and is desirous of becoming an exclusive distributor for the Product in any blend with Krill Oil (hereinafter \"the Finished Product\") in all distribution channels in the Territory on the terms and conditions set forth herein.\n\nC. SIERRA is desirous of having ENVISION act as its exclusive distributor for the Product in any blend with Krill Oil in all distribution channels in the Territory on the terms and conditions set forth herein.\n\nNOW, THEREFORE, it is hereby agreed as follows:\n\n1. Incorporation of Recitals. The Recitals set forth in Paragraphs A through C, above, are incorporated herein as though set forth in full.\n\n2. Appointment. SIERRA hereby appoints ENVISION as its exclusive distributor for the Product in any blend with Krill Oil within the Territory subject to ENVISION fulfilling the terms and conditions of the best efforts marketing requirements set forth herein in Sections 4, 5, and 9. SIERRA shall cease making sales to any customer or distributor who, during the term of this Agreement, violates ENVISION's exclusivity.\n\n3. Territory. The Territory shall be the entire world.\n\n4. Prices and Terms. The price for the Product as set forth in Section 9 herein, sold by SIERRA to ENVISION, shall be subject to change due to changes in manufacturing costs and so as to maximize profits; any changes in price for the Product shall not be applicable to previously accepted orders and shall be made with at least ninety (90) days advance notice in writing and in good faith by conference of the parties. ENVISION shall not resell the Product alone. Terms of payment will be 1/3 upon placement of order and 2/3 balance net thirty (30) days or as mutually agreed upon in writing between the parties. Delivery will be F.O.B. ENVISION shall be responsible for all costs of shipping from SIERRA to ENVISION.\n\n5. Product Support. ENVISION will use its best efforts to market and sell the Finished Product throughout the Territory. The parties also agree that:\n\n o If SIERRA customers are interested in purchasing the Product in any blend with Krill Oil, SIERRA will refer them to ENVISION.\n\n o ENVISION will be responsible for all costs associated with developing and manufacturing the Finished Product.\n\n6. Sales Disclosures. ENVISION will provide SIERRA with demand projections for the Product and SIERRA will produce enough Product to meet such demand projections. ENVISION will inform SIERRA of committed sales and SIERRA will increase or scale up its production of the Product accordingly. SIERRA will not unreasonably withhold the Product, but shall not be liable for unfulfilled or partially fulfilled orders given just cause for such action.\n\n7. Term. The term of this Agreement shall be two (2) years from the Effective Date with automatic annual renewals thereafter provided either party does not provide sixty (60) days notice of termination prior to the renewal date or the Agreement is not otherwise terminated as set forth in Section 8.\n\n8. Termination. (a) Upon the occurrence of a material breach or default as to any obligation, term or provision contained herein by either party and the failure of the breaching party to promptly pursue (within thirty (30) days after receiving written notice thereof from the non-breaching party) a reasonable remedy designed to cure (in the reasonable judgment of the non-breaching party) such material breach or default, this Agreement may be terminated by the non-breaching party by giving written notice of termination to the breaching party, such termination\n\n\n\n\n\n being immediately effective upon the giving of such notice of termination.\n\n (b) Upon the occurrence of bankruptcy of the other party, breach of confidentiality, government legislative interference, or force majeure extending beyond sixty (60) days, either party may immediately terminate the Agreement.\n\n9. Purchase Requirements. During the term of this Agreement, ENVISION will exclusively purchase the Product from SIERRA. The parties mutually agree to the Purchase Price of:\n\n Product Purchase Price ----------------------------------------------- A. SierraSil Per Sierra Sil's wholesale price list.\n\n10. Intellectual Property. SIERRA is responsible for all Patent costs for the Product. SIERRA warrants it owns pending patents for the Product in the U.S. and internationally. SIERRA hereby grants ENVISION an exclusive, royalty-free sub-license of the Product's future patents, and patent applications to distribute, sell and market the Finished Product. SIERRA hereby agrees to indemnify, defend and hold ENVISION harmless from any claims that the Product infringes upon any other patent.\n\n11. Trademarks SIERRA is the owner of the trademark&sbsp; \"SierraSil\". This Agreement grants ENVISION a non-exclusive and non-royalty bearing license to use the mark \"SierraSil\". SIERRA shall at all times be the owner of the trademark and ENVISION shall acquire no rights thereto. Upon termination, ENVISION shall have eighteen (18) months to exhaust any inventories, packaging and advertising materials bearing the \"SierraSil\" trademark and SIERRA shall have first option to buy back any inventory at ENVISION's net purchase price.\n\n12. Independent Contractor Status. The parties acknowledge that ENVISION is an independent contractor and shall not be deemed to be an employee, agent, or joint venturer of SIERRA for any purpose, including federal tax purposes.\n\n13. Warranty. SIERRA warrants that the Product shall be free from defects in material and workmanship for the reasonable shelf life of the Product. In the event of any breach of this warranty or in the event any user of Product makes a claim that the Product was the cause of personal injury or property damage (product liability claim), SIERRA shall indemnify, defend and hold ENVISION harmless from any liability occasioned by a breach of warranty or a product liability claim. SIERRA warrants that it carries general liability insurance of not less than $2 million per occurrence and product liability insurance of not less than $5 million per occurrence and that, upon the execution of this Agreement, it will name ENVISION as an additional insured on such policies. SIERRA further warrants that the Product will not be adulterated or misbranded within the meaning of any federal, state, or local law or regulation or other applicable law. SIERRA agrees to promptly notify ENVISION of any problem, anomaly, defect or condition which would reasonably cause ENVISION's concern relative to stability, reliability, form, fit, function or quality of the Product.\n\n ENVISION warrants that the Finished Product will not be adulterated or misbranded within the meaning of any federal, state, or local law or regulation or other applicable law. In the event of any breach of this warranty or in the event any user of the Finished Product makes a claim that the Finished Product was the cause of personal injury or property damage (product liability claim), ENVISION shall indemnify, defend, and hold SIERRA harmless from any liability occasioned by a breach of warranty or a product liability claim. ENVISION warrants that it carries general liability insurance of $1 million per occurrence and product liability insurance of not less than $2 million per occurrence and that, upon execution of this Agreement, it will name SIERRA as an additional insured on such policies.\n\n14. Confidential Information. The parties acknowledge that, during the term of this Agreement, each may receive certain Proprietary Information of the other. Proprietary Information includes, without limitation, formula, scientific studies, processes, plans, formulations, technical information, new product information, methods of product delivery, test procedures, product samples, specifications, scientific, clinical, commercial and other information or data, customer lists, customer contacts, and other distributors within the Territory which are considered confidential in nature whether communicated in writing or orally. The parties agree that each will treat such information as confidential. Neither party shall have the right to disclose the Proprietary Information to any third party without the express written consent of the disclosing party. Neither party may use the proprietary information except in furtherance of the goals of this Agreement and is further prohibited from utilizing the Proprietary Information directly nor indirectly to engage in any business activity which is competitive with the other.\n\n15. Force Majeure. In no event shall any party be responsible for its failure to fulfill any of its obligations under this Agreement when such failure is due to fires, floods, riots, strikes, freight embargoes, acts of God or insurrection. In the event of a force majeure, the party affected thereby shall give immediate written notice to the other. If the event of force majeure continues for longer than\n\n\n\n\n\n sixty (60) days, the party not so affected shall have the right to terminate this Agreement.\n\n16. Non-Waiver of Default. The failure of either party at any time to require the performance by a party of any provision of this Agreement shall in no way affect the right to require performance at any time after such failure. The waiver of either party of a breach of any provision of this Agreement shall not be taken to be a waiver of any succeeding breach of the provision or as a waiver of the provision itself.\n\n17. Attorney's Fees. In the event either party is required to institute litigation to enforce any provision of this Agreement, the prevailing party in such litigation shall be entitled to recover all costs including without limitation, reasonable attorney's fees and expenses incurred in connection with such enforcement and collection.\n\n18. Venue. This Agreement is deemed to have been entered into in the State of Colorado, and its interpretation, construction, and the remedies for its enforcement or breach are to be applied pursuant to and in accordance with the laws of the State of Colorado.\n\n19. Notices. Any and all notices or other communication required or permitted to be given pursuant to this Agreement shall be in writing and shall be construed as properly given if mailed first class, postage prepaid to the address specified herein. Either party may designate, in writing, a change of address or other place to which notices may be sent.\n\n If to SIERRA: If to LIFEUSA/ENVISION: Mr. Michael Bentley Mr. Michael Schuett Sierra Mountain Minerals Inc. Envision Health, Inc. 1501 West Broadway, Suite 500 2475 Broadway, Suite 202 Vancouver BC V6J4Z6 Boulder, CO 80304 Canada\n\n20. Amendment. This Agreement shall not be modified or amended except by a written agreement executed by both parties.\n\n21. Entire Agreement. This Agreement constitutes the entire agreement between the parties with respect to the subject matter thereof and supersedes all prior agreements, whether written or oral.\n\n22. Assignment. The parties shall have the right to assign all, or part, of its rights under this Agreement to any wholly owned subsidiary or affiliate without the consent of the other Party. Any other assignment by the parties, requires the prior written consent of the other Party.\n\nACKNOWLEDGEMENTS\n\n Each party acknowledges that he or she has had an adequate opportunity to read and study this Agreement. The understanding of the aforesaid articles causes no difficulty whatsoever and each party has retained a copy of this agreement immediately after the signing of it by all parties.\n\n IN WITNESS WHEREOF, the parties have executed this Agreement effective as of the date and year first written above.\n\nSIERRA MOUNTAIN MINERALS LIFEUSA/ENVISION HEALTH\n\nBy: /s/ Michael Bentley By: /s/ Michael Schuett ----------------------- ------------------------- Michael Bentley Michael Schuett\n\n December 8, 2005 December 7, 2005 ----------------------- ------------------------------ Date Date", "question": "Highlight the parts (if any) of this contract related to \"Third Party Beneficiary\" that should be reviewed by a lawyer. Details: Is there a non-contracting party who is a beneficiary to some or all of the clauses in the contract and therefore can enforce its rights against a contracting party?", "answers.text": [ "" ], "answers.answer_start": [ -1 ], "feat_id": [ "LEGACYTECHNOLOGYHOLDINGS,INC_12_09_2005-EX-10.2-DISTRIBUTOR AGREEMENT__Third Party Beneficiary_0" ], "feat_title": [ "LEGACYTECHNOLOGYHOLDINGS,INC_12_09_2005-EX-10.2-DISTRIBUTOR AGREEMENT" ] } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "context": "Value(dtype='string', id=None)", "question": "Value(dtype='string', id=None)", "answers.text": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)", "answers.answer_start": "Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None)", "feat_id": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)", "feat_title": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 16687 | | valid | 4182 |
adrienheymans/autotrain-data-csi5386
[ "language:en", "region:us" ]
2022-11-06T15:30:45+00:00
{"language": ["en"]}
2022-11-07T00:44:12+00:00
968084f5cdec40cd12c2155cd044158d31819244
~ 15k logo images from LAION-5B have been rated for aesthetic preference ( preference_average ) and for how professional the design look ( professionalism_average ). --- license: apache-2.0 ---
ChristophSchuhmann/aesthetic-logo-ratings
[ "region:us" ]
2022-11-06T15:42:12+00:00
{}
2022-11-06T15:48:48+00:00
8daf4761566324fe9e52e121be2463fef5b1132c
siberspace/rwix
[ "region:us" ]
2022-11-06T16:11:14+00:00
{}
2022-11-06T16:12:03+00:00
b0a18171184271af6f198046eb0682c592d3fd53
Renanriozz/renanrzrz
[ "license:afl-3.0", "region:us" ]
2022-11-06T16:26:26+00:00
{"license": "afl-3.0"}
2022-11-06T16:27:53+00:00
799b08af2dc8fecbeb87cd55c8b38d6edc73a927
marianna13/improved_aesthetics_4.5plus-hr
[ "license:cc-by-4.0", "region:us" ]
2022-11-06T17:39:54+00:00
{"license": "cc-by-4.0"}
2022-11-06T17:39:54+00:00
06ee653c1ee0c6de272a4e611792829d16d8dfcb
# Dataset Card for "InstaFoodSet" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dizex/InstaFoodSet
[ "region:us" ]
2022-11-06T19:39:47+00:00
{"dataset_info": {"features": [{"name": "tokens", "sequence": "string"}, {"name": "iob_tags", "sequence": "string"}, {"name": "iob_tags_ids", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 346804, "num_examples": 320}, {"name": "val", "num_bytes": 37219, "num_examples": 40}, {"name": "test", "num_bytes": 39352, "num_examples": 40}], "download_size": 84698, "dataset_size": 423375}}
2022-12-11T20:07:40+00:00
8093fd2c6a57407a7cac975c7e5525f1dd16a2e6
This dataset contains more than 13,000 AI-generated key findings from scientific studies and industry reports about veganism, animal rights activism, marketing and other topics that may be useful for vegan businesses and animal rights activists. We've made this dataset freely available so that it may benefit the wider movement as much as possible. Each row in the CSV contains the title of the study, a link to the study and an AI-generated key finding from the study. Most key findings are a single sentence, while some are two or three, and all are written in natural, easy-to-understand language. These AI-generated key findings were summarised from the abstracts of their respective studies using a combination of SciTLDR and our own specialised AI summarization model known as TLDR Vegan Studies, which is freely accessible here: https://huggingface.co/VEG3/TLDR-Vegan-Studies There are some important limitations to consider before using this dataset. First, because each finding is generated by AI and not all have been manually approved by a human, there's no guarantee that 100% of the key findings generated are completely accurate. Second, there may be a bias in summary generation towards the kinds of results that can be found in the dataset used to generate the TLDR Vegan Studies model. Finally, because multiple different sources were used to collect studies for inclusion in this dataset, there are multiple key findings for the same study in many cases, and this may bias the overall dataset towards the result of studies that are more widely distributed. We recommend using this dataset to get a broad overview of what the greater body of research says on the topics covered, rather than relying on it entirely to verify any particular factual claim. Depending on your use case, you might get the best results by deduplicating the dataset by title, URL and/or key finding before training any ML models on it.
VEG3/VeganStudySummaries
[ "region:us" ]
2022-11-06T20:14:19+00:00
{}
2022-11-06T20:37:08+00:00
3a84e0922e7e92b3488088803eb370243c823307
# Dataset Card for "captioned-cartoons" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
juliaturc/captioned-cartoons
[ "region:us" ]
2022-11-06T23:02:31+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 22981331.0, "num_examples": 100}], "download_size": 22873699, "dataset_size": 22981331.0}}
2022-11-08T03:09:08+00:00
f61962036a0d12c1e92a66846e48a41eee6f6198
Eldog333/Me
[ "license:other", "region:us" ]
2022-11-07T00:49:24+00:00
{"license": "other"}
2022-11-07T00:50:03+00:00
82cfe4739bc635408dd8bc09cb0185cae3e92398
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: 123tarunanand/roberta-base-finetuned * Dataset: cuad * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@adrienheymans](https://huggingface.co/adrienheymans) for evaluating this model.
autoevaluate/autoeval-eval-cuad-default-2fec59-2004766522
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T00:50:51+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["cuad"], "eval_info": {"task": "extractive_question_answering", "model": "123tarunanand/roberta-base-finetuned", "metrics": ["recall"], "dataset_name": "cuad", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"context": "context", "question": "question", "answers-text": "answers.text", "answers-answer_start": "answers.answer_start"}}}
2022-11-07T01:26:47+00:00
d740ca483f2af3a6b5cea2cba8c3662fb93021ad
# Dataset Card for "vlpr-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ThankGod/vlpr-dataset
[ "region:us" ]
2022-11-07T03:45:43+00:00
{"dataset_info": {"features": [{"name": "image_id", "dtype": "int64"}, {"name": "image", "dtype": "image"}, {"name": "width", "dtype": "int64"}, {"name": "height", "dtype": "int64"}, {"name": "objects", "sequence": [{"name": "bbox_id", "dtype": "int64"}, {"name": "category", "dtype": {"class_label": {"names": {"0": "license_plate"}}}}, {"name": "bbox", "sequence": "float64", "length": 4}, {"name": "area", "dtype": "float64"}]}], "splits": [{"name": "train", "num_bytes": 9147825.0, "num_examples": 54}], "download_size": 9149130, "dataset_size": 9147825.0}}
2022-11-17T08:06:40+00:00
736f2c2384b098d610a948cc28ac8dbf5d988338
haris-waqar444/tweet_eval
[ "license:apache-2.0", "region:us" ]
2022-11-07T04:39:52+00:00
{"license": "apache-2.0"}
2022-11-07T04:39:52+00:00
0f70b23014485c74cb168659aeb4ae8b2bb9338a
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-350m_eval * Dataset: mathemakitten/winobias_antistereotype_test_cot_v3 * Config: mathemakitten--winobias_antistereotype_test_cot_v3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@mathemakitten](https://huggingface.co/mathemakitten) for evaluating this model.
autoevaluate/autoeval-eval-mathemakitten__winobias_antistereotype_test_cot_v3-math-468e93-2011366585
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T06:34:17+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["mathemakitten/winobias_antistereotype_test_cot_v3"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-350m_eval", "metrics": [], "dataset_name": "mathemakitten/winobias_antistereotype_test_cot_v3", "dataset_config": "mathemakitten--winobias_antistereotype_test_cot_v3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T06:35:59+00:00
0478bf1b7ee64012b862a64c61376ba8e4b81cef
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-13b_eval * Dataset: mathemakitten/winobias_antistereotype_test_cot_v3 * Config: mathemakitten--winobias_antistereotype_test_cot_v3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@mathemakitten](https://huggingface.co/mathemakitten) for evaluating this model.
autoevaluate/autoeval-eval-mathemakitten__winobias_antistereotype_test_cot_v3-math-468e93-2011366584
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T06:34:19+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["mathemakitten/winobias_antistereotype_test_cot_v3"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-13b_eval", "metrics": [], "dataset_name": "mathemakitten/winobias_antistereotype_test_cot_v3", "dataset_config": "mathemakitten--winobias_antistereotype_test_cot_v3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T07:04:04+00:00
d3dcec73a9f84f887dd40da86b11926bd9c39ea8
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-1.3b_eval * Dataset: mathemakitten/winobias_antistereotype_test_cot_v3 * Config: mathemakitten--winobias_antistereotype_test_cot_v3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@mathemakitten](https://huggingface.co/mathemakitten) for evaluating this model.
autoevaluate/autoeval-eval-mathemakitten__winobias_antistereotype_test_cot_v3-math-468e93-2011366588
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T06:34:19+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["mathemakitten/winobias_antistereotype_test_cot_v3"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-1.3b_eval", "metrics": [], "dataset_name": "mathemakitten/winobias_antistereotype_test_cot_v3", "dataset_config": "mathemakitten--winobias_antistereotype_test_cot_v3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T06:38:23+00:00
9528cc5a986594568e09e1e68d994190c0016c39
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-125m_eval * Dataset: mathemakitten/winobias_antistereotype_test_cot_v3 * Config: mathemakitten--winobias_antistereotype_test_cot_v3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@mathemakitten](https://huggingface.co/mathemakitten) for evaluating this model.
autoevaluate/autoeval-eval-mathemakitten__winobias_antistereotype_test_cot_v3-math-468e93-2011366581
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T06:34:19+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["mathemakitten/winobias_antistereotype_test_cot_v3"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-125m_eval", "metrics": [], "dataset_name": "mathemakitten/winobias_antistereotype_test_cot_v3", "dataset_config": "mathemakitten--winobias_antistereotype_test_cot_v3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T06:35:22+00:00
33d75241af98f80560bf0740ceccc7c6e8039c6e
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-30b_eval * Dataset: mathemakitten/winobias_antistereotype_test_cot_v3 * Config: mathemakitten--winobias_antistereotype_test_cot_v3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@mathemakitten](https://huggingface.co/mathemakitten) for evaluating this model.
autoevaluate/autoeval-eval-mathemakitten__winobias_antistereotype_test_cot_v3-math-468e93-2011366582
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T06:34:19+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["mathemakitten/winobias_antistereotype_test_cot_v3"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-30b_eval", "metrics": [], "dataset_name": "mathemakitten/winobias_antistereotype_test_cot_v3", "dataset_config": "mathemakitten--winobias_antistereotype_test_cot_v3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T07:45:13+00:00
5ce28162a971171ec4ebaa843086933f44514bdc
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-2.7b_eval * Dataset: mathemakitten/winobias_antistereotype_test_cot_v3 * Config: mathemakitten--winobias_antistereotype_test_cot_v3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@mathemakitten](https://huggingface.co/mathemakitten) for evaluating this model.
autoevaluate/autoeval-eval-mathemakitten__winobias_antistereotype_test_cot_v3-math-468e93-2011366587
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T06:34:20+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["mathemakitten/winobias_antistereotype_test_cot_v3"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-2.7b_eval", "metrics": [], "dataset_name": "mathemakitten/winobias_antistereotype_test_cot_v3", "dataset_config": "mathemakitten--winobias_antistereotype_test_cot_v3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T06:41:03+00:00
d39f59b98fe3d3de23022816e0b7628e997be832
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-6.7b_eval * Dataset: mathemakitten/winobias_antistereotype_test_cot_v3 * Config: mathemakitten--winobias_antistereotype_test_cot_v3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@mathemakitten](https://huggingface.co/mathemakitten) for evaluating this model.
autoevaluate/autoeval-eval-mathemakitten__winobias_antistereotype_test_cot_v3-math-468e93-2011366586
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T06:34:30+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["mathemakitten/winobias_antistereotype_test_cot_v3"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-6.7b_eval", "metrics": [], "dataset_name": "mathemakitten/winobias_antistereotype_test_cot_v3", "dataset_config": "mathemakitten--winobias_antistereotype_test_cot_v3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T06:50:47+00:00
c369a14220f7ffb54a945a9c116080200e449160
# Dataset Card for "en-bg" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
popaqy/en-bg
[ "region:us" ]
2022-11-07T07:41:12+00:00
{"dataset_info": {"features": [{"name": "bg", "dtype": "string"}, {"name": "en", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 175001915, "num_examples": 408290}], "download_size": 82909795, "dataset_size": 175001915}}
2022-11-07T07:43:16+00:00
b9984b8d2a95e4a1879e1b071e9433858d0bc24a
This dataset repository contains a subset of the UCF-101 dataset [1]. The subset archive was obtained using the code from [this guide](https://www.tensorflow.org/tutorials/load_data/video). ### References [1] UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild, https://arxiv.org/abs/1212.0402.
sayakpaul/ucf101-subset
[ "license:apache-2.0", "arxiv:1212.0402", "region:us" ]
2022-11-07T07:48:27+00:00
{"license": "apache-2.0"}
2022-12-19T09:51:35+00:00
044daee39e83f3e8bbe83f1f3e90843b903b44b6
# Dataset Card for "europarl-bg-en" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
popaqy/europarl-bg-en
[ "region:us" ]
2022-11-07T07:57:26+00:00
{"dataset_info": {"features": [{"name": "bg", "dtype": "string"}, {"name": "en", "dtype": "string"}, {"name": "sentence_len", "dtype": "int64"}, {"name": "clear", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 178319272, "num_examples": 408290}], "download_size": 83310937, "dataset_size": 178319272}}
2022-11-07T08:04:07+00:00
76ed820a006877cdd73f111895d924fc402a64d5
ahaha111/mimi
[ "license:mit", "region:us" ]
2022-11-07T08:29:52+00:00
{"license": "mit"}
2022-11-07T08:31:18+00:00
d03b4dd788c7bcc417aa2bd9a43c2b58033a7bef
Based on the repository in https://github.com/bnitsan/PaperTweet/ Every entry in the dataset represents a Twitter thread written about a new paper on arXiv, likely by one of the original authors. --- license: mit ---
nitsanb/paper_tweet
[ "region:us" ]
2022-11-07T09:02:56+00:00
{}
2022-11-07T09:39:31+00:00
5ffc27f405dd8765dc35fd678bce103e26403865
Rocks dataset with 7 classes: [Coal, Limestone, Marble, Sandstone, Quartzite, Basalt, Granite]
udayl/rocks
[ "license:mit", "region:us" ]
2022-11-07T09:06:56+00:00
{"license": "mit"}
2022-11-07T09:15:20+00:00
e91bfcac4e871fb739e6f0e277b2134f59ef13ec
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-2.7b * Dataset: futin/guess * Config: en * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en-6f8c6a-2012266598
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:13:56+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-2.7b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T10:09:27+00:00
a55c0a73858f1bf4350e7d278f7f0eccbd1b3ef2
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-350m * Dataset: futin/guess * Config: vi * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-vi-4200fb-2012366608
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:13:56+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-350m", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "vi", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T09:55:00+00:00
f39559ec547386ac00c2d756fa3640ae5d7ce3ab
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-13b * Dataset: futin/guess * Config: en * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en-6f8c6a-2012266596
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:13:56+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-13b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T13:20:45+00:00
d6a694e106fe23d4fb1b77906a54105c112c81f0
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-30b * Dataset: futin/guess * Config: vi * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-vi-4200fb-2012366603
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:13:57+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-30b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "vi", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-08T05:49:46+00:00
a26f906a89a8ad319882e66c4536430682e10ef9
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-30b * Dataset: futin/guess * Config: en * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en-6f8c6a-2012266595
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:13:57+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-30b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T18:30:26+00:00
2dd1a45cc2633662ca009e6639e4da519cd7f273
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-1.3b * Dataset: futin/guess * Config: en * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en-6f8c6a-2012266599
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:13:57+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-1.3b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T09:54:20+00:00
9dbf085c474f5e385751fe20b32ab88270e11553
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-13b * Dataset: futin/guess * Config: vi * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-vi-4200fb-2012366604
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:13:57+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-13b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "vi", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T17:07:15+00:00
61bb27e8f82641e484cbd89bfb3f2196646eeb58
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-125m * Dataset: futin/guess * Config: en * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en-6f8c6a-2012266601
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:13:57+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-125m", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T09:22:01+00:00
395f330c131afd21a1868c328e85328fc06b472d
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-2.7b * Dataset: futin/guess * Config: vi * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-vi-4200fb-2012366606
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:13:58+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-2.7b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "vi", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T11:15:03+00:00
fb66d9176a74921eeaeffd525c3bb4d00fdb25e6
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-6.7b * Dataset: futin/guess * Config: vi * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-vi-4200fb-2012366605
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:13:58+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-6.7b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "vi", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T13:47:33+00:00
2a2d8bca11ab1639ce0caa5c5d1e97751433f6e2
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-6.7b * Dataset: futin/guess * Config: en * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en-6f8c6a-2012266597
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:13:58+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-6.7b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T11:54:59+00:00
c330f79de6bb67f52fb257ed995c2a14a85ca149
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-66b * Dataset: futin/guess * Config: en * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en-6f8c6a-2012266594
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:13:59+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-66b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-08T06:00:32+00:00
696b517c95bf6aab100f547d938abef511687f86
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-1.3b * Dataset: futin/guess * Config: vi * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-vi-4200fb-2012366607
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:13:59+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-1.3b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "vi", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T10:34:07+00:00
015f444197ecc37c81070714ac1c329aad00fa35
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-350m * Dataset: futin/guess * Config: en * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en-6f8c6a-2012266600
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:14:03+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-350m", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T09:29:28+00:00
eb100ee78df88d98359981baece7dca4a77726df
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-125m * Dataset: futin/guess * Config: vi * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-vi-4200fb-2012366609
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:30:31+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-125m", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "vi", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T09:54:16+00:00
039e2bcdb13add2922938792f533d7c83c15845d
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-66b * Dataset: futin/guess * Config: en_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en_3-fcaae9-2012466610
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T09:38:35+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-66b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T21:40:41+00:00
05d40f043da6ff55f9ae44a8f592773102bd3d71
Larvik/gelb
[ "license:unknown", "region:us" ]
2022-11-07T09:51:56+00:00
{"license": "unknown"}
2022-11-10T10:55:55+00:00
ed7ea0413ac649b9e948792bf8f2fcd3ae8de093
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-6.7b * Dataset: futin/guess * Config: en_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en_3-fcaae9-2012466613
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T10:01:58+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-6.7b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T11:34:37+00:00
3d1373f9fc083be53a80c3a87ef813655f586585
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-13b * Dataset: futin/guess * Config: en_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en_3-fcaae9-2012466612
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T10:02:01+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-13b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T12:21:25+00:00
a73c37eba883fdee7dea82ff92571db441a8f4da
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-30b * Dataset: futin/guess * Config: en_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en_3-fcaae9-2012466611
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T10:02:06+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-30b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T15:23:23+00:00
e3834ddd9ef488efb339f1081015346d8fd868cf
## Dataset description Datataset containing SPOUT knotted (positive) and Rossmann unknotted (negative) proteins.
EvaKlimentova/knots_SPOUTxRossmann
[ "region:us" ]
2022-11-07T10:05:06+00:00
{}
2022-11-11T08:11:01+00:00
de90943f076255e5ccc9c5579999093ff86c57e3
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-2.7b * Dataset: futin/guess * Config: en_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en_3-fcaae9-2012466614
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T10:18:07+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-2.7b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T10:49:25+00:00
8379b1e2b6c1050bafc3368aff20b3c470bf270f
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-1.3b * Dataset: futin/guess * Config: en_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en_3-fcaae9-2012466615
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T10:42:26+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-1.3b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T11:05:11+00:00
a5d439de7d37530a429d374ca5d79ddfcf2c6746
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-350m * Dataset: futin/guess * Config: en_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en_3-fcaae9-2012466616
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T10:57:21+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-350m", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T11:06:14+00:00
e7718716717bb01209e1282f8d34a53b5e5e334e
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-125m * Dataset: futin/guess * Config: en_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-en_3-fcaae9-2012466617
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T11:13:15+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-125m", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "en_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T11:17:46+00:00
4d1fc63d115a7b05160a7dd57eef36033ac92013
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-66b * Dataset: futin/guess * Config: vi_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-vi_3-6b1064-2012566618
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T11:13:22+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-66b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "vi_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-08T09:39:15+00:00
25277e0705b169505aff30510add82e3fb10e7aa
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-30b * Dataset: futin/guess * Config: vi_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-vi_3-6b1064-2012566619
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T11:23:16+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-30b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "vi_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T23:30:24+00:00
72c86a7e2c7b3452e22da4b75005c6270d6563c2
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-13b * Dataset: futin/guess * Config: vi_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-vi_3-6b1064-2012566620
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T11:26:14+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-13b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "vi_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T15:53:29+00:00
0b741a2cfe29293da11fd97f3de3928c6a9be645
# Dataset Card for "petitions-ds" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
eminecg/petitions-ds-v1
[ "region:us" ]
2022-11-07T11:42:39+00:00
{"dataset_info": {"features": [{"name": "petition", "dtype": "string"}, {"name": "petition_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 30642006.6, "num_examples": 2484}, {"name": "validation", "num_bytes": 3404667.4, "num_examples": 276}], "download_size": 15766696, "dataset_size": 34046674.0}}
2022-11-07T15:13:37+00:00
6fb735b46951e0d9c3a5fac8e26228b4f39c0c3a
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-6.7b * Dataset: futin/guess * Config: vi_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-vi_3-6b1064-2012566621
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T11:42:59+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-6.7b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "vi_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T14:15:29+00:00
4a93cb3d96048c9cbde09b82e99fe9edeeb29584
Pokernights/Musicindustry
[ "license:afl-3.0", "region:us" ]
2022-11-07T11:48:20+00:00
{"license": "afl-3.0"}
2022-11-07T11:48:20+00:00
55817960b45bea6f432f3dfb94c0ebdc39a1f078
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-2.7b * Dataset: futin/guess * Config: vi_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-vi_3-6b1064-2012566622
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T12:03:14+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-2.7b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "vi_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T13:10:12+00:00
c6dfdee3276b2433a65ab83b4e3e31fc0c7d39a0
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-1.3b * Dataset: futin/guess * Config: vi_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-vi_3-6b1064-2012566623
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T12:29:52+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-1.3b", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "vi_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T13:13:35+00:00
fec8f5bb1ea4e8f2cc868c685c1873deb78d2712
# Dataset Card for laion2B-multi-turkish-subset ## Dataset Description - **Homepage:** [laion-5b](https://laion.ai/blog/laion-5b/) - **Huggingface:** [laion/laion2B-multi](https://huggingface.co/datasets/laion/laion2B-multi) - **Point of Contact:** [mcemilg](mailto:[email protected]) ### Dataset Summary [LAION-5B](https://laion.ai/blog/laion-5b/) is a large scale openly accessible image-text dataset contains text from multiple languages. This is a Turkish subset data of [laion/laion2B-multi](https://huggingface.co/datasets/laion/laion2B-multi). It's compatible to be used with [image2dataset](https://github.com/rom1504/img2dataset) to fetch the images at scale. ### Data Structure ```python DatasetDict({ train: Dataset({ features: ['SAMPLE_ID', 'URL', 'TEXT', 'HEIGHT', 'WIDTH', 'LICENSE', 'LANGUAGE', 'NSFW', 'similarity'], num_rows: 34638627 }) }) ``` ```python { 'SAMPLE_ID': Value(dtype='int64', id=None), 'URL': Value(dtype='string', id=None), 'TEXT': Value(dtype='string', id=None), 'HEIGHT': Value(dtype='int64', id=None), 'WIDTH': Value(dtype='int64', id=None), 'LICENSE': Value(dtype='string', id=None), 'LANGUAGE': Value(dtype='string', id=None), 'NSFW': Value(dtype='string', id=None), 'similarity': Value(dtype='float64', id=None) } ``` ### Notes The data was basically processed to drop non-Turkish and irrelevant texts before published. Both [FastText](https://fasttext.cc/docs/en/language-identification.html) and [langdetect](https://pypi.org/project/langdetect/) libraries were used to identify if the text is Turkish or not. The cleaning process can be summarized as follows: - replace \"\"\" with empty str - remove URLs in texts - Drop if both FastText and LangDetect are highly confident with there is no Turkish in text. - Drop empty text fields. ### License CC-BY-4.0
mcemilg/laion2B-multi-turkish-subset
[ "task_categories:text-to-image", "task_categories:image-to-text", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:10M<n<100M", "language:tr", "license:cc-by-4.0", "region:us" ]
2022-11-07T13:05:52+00:00
{"annotations_creators": ["crowdsourced"], "language_creators": ["crowdsourced"], "language": ["tr"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["10M<n<100M"], "task_categories": ["text-to-image", "image-to-text"], "pretty_name": "laion2B-multi-turkish-subset"}
2022-11-08T05:47:01+00:00
fc5403fde3fa41ff2746b053fc6c21bb2e4082fb
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-350m * Dataset: futin/guess * Config: vi_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-vi_3-6b1064-2012566624
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T13:19:17+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-350m", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "vi_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T13:43:07+00:00
40799b6c0e33e5987c90fa0dab4f9d9b903d09d2
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-125m * Dataset: futin/guess * Config: vi_3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__guess-vi_3-6b1064-2012566625
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T13:21:53+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/guess"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-125m", "metrics": [], "dataset_name": "futin/guess", "dataset_config": "vi_3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T13:34:54+00:00
b93dc0317cb147a3c53de17c629714518effba9e
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-66b_eval * Dataset: mathemakitten/winobias_antistereotype_test_cot_v3 * Config: mathemakitten--winobias_antistereotype_test_cot_v3 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@mathemakitten](https://huggingface.co/mathemakitten) for evaluating this model.
autoevaluate/autoeval-eval-mathemakitten__winobias_antistereotype_test_cot_v3-math-237e7b-2016766699
[ "autotrain", "evaluation", "region:us" ]
2022-11-07T17:08:50+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["mathemakitten/winobias_antistereotype_test_cot_v3"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-66b_eval", "metrics": [], "dataset_name": "mathemakitten/winobias_antistereotype_test_cot_v3", "dataset_config": "mathemakitten--winobias_antistereotype_test_cot_v3", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-07T19:44:52+00:00