sha
stringlengths 40
40
| text
stringlengths 0
13.4M
| id
stringlengths 2
117
| tags
list | created_at
stringlengths 25
25
| metadata
stringlengths 2
31.7M
| last_modified
stringlengths 25
25
|
---|---|---|---|---|---|---|
d7bd1920cf7cbe1fa5d53d752ddf18e08d30edf2
|
# Dataset Card for "chunk_228"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_228
|
[
"region:us"
] |
2023-04-13T01:25:18+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 22202359632.125, "num_examples": 231159}], "download_size": 18820040745, "dataset_size": 22202359632.125}}
|
2023-04-13T01:43:57+00:00
|
775afaf45c155d14a3cf5330f08b7a9c34b615e9
|
# Dataset Card for "arithmetic_2as_1to10"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2as_1to10
|
[
"region:us"
] |
2023-04-13T01:29:01+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "int64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 54740, "num_examples": 2000}, {"name": "validation", "num_bytes": 10960, "num_examples": 400}], "download_size": 11744, "dataset_size": 65700}}
|
2023-04-13T01:29:03+00:00
|
0ef2e9bd1b18f47c4656a78bc848dd7e5e6b72a1
|
# Dataset Card for "arithmetic_2as_1to50"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2as_1to50
|
[
"region:us"
] |
2023-04-13T01:29:04+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "int64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 57298, "num_examples": 2000}, {"name": "validation", "num_bytes": 11446, "num_examples": 400}], "download_size": 17085, "dataset_size": 68744}}
|
2023-04-13T01:29:06+00:00
|
2d3a1e20196b4693feb2a54ef34af8dc21378f16
|
# Dataset Card for "arithmetic_2as_1to100"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2as_1to100
|
[
"region:us"
] |
2023-04-13T01:29:07+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "int64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 57764, "num_examples": 2000}, {"name": "validation", "num_bytes": 11544, "num_examples": 400}], "download_size": 19645, "dataset_size": 69308}}
|
2023-04-13T01:29:09+00:00
|
2487e8769a38894b616ecf8a79342aba080a30a0
|
# Dataset Card for "arithmetic_2as_1to500"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2as_1to500
|
[
"region:us"
] |
2023-04-13T01:29:09+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "int64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 61120, "num_examples": 2000}, {"name": "validation", "num_bytes": 12238, "num_examples": 400}], "download_size": 26336, "dataset_size": 73358}}
|
2023-04-13T01:29:12+00:00
|
89d15d7b96bd7e242d270ae8866037b854dd053e
|
# Dataset Card for "arithmetic_2as_1to1000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2as_1to1000
|
[
"region:us"
] |
2023-04-13T01:29:12+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "int64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 61582, "num_examples": 2000}, {"name": "validation", "num_bytes": 12344, "num_examples": 400}], "download_size": 28386, "dataset_size": 73926}}
|
2023-04-13T01:29:14+00:00
|
26652c3e5d8dc8b7525e860a6449cb548fd68075
|
# Dataset Card for "arithmetic_2md_1to5"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2md_1to5
|
[
"region:us"
] |
2023-04-13T01:31:42+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "float64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 54000, "num_examples": 2000}, {"name": "validation", "num_bytes": 10800, "num_examples": 400}], "download_size": 9908, "dataset_size": 64800}}
|
2023-04-13T01:57:36+00:00
|
5e83466b7928e849f442424e481843c0189cf68a
|
# Dataset Card for "arithmetic_2md_1to10"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2md_1to10
|
[
"region:us"
] |
2023-04-13T01:31:45+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "float64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 54660, "num_examples": 2000}, {"name": "validation", "num_bytes": 10960, "num_examples": 400}], "download_size": 13336, "dataset_size": 65620}}
|
2023-04-13T01:57:39+00:00
|
b405f19716e509dae3b9a932da544e073349a42b
|
# Dataset Card for "arithmetic_2md_1to50"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2md_1to50
|
[
"region:us"
] |
2023-04-13T01:31:48+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "float64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 57262, "num_examples": 2000}, {"name": "validation", "num_bytes": 11468, "num_examples": 400}], "download_size": 25093, "dataset_size": 68730}}
|
2023-04-13T01:57:42+00:00
|
6c116964469180318b845c693008cd6bac651e86
|
# Dataset Card for "arithmetic_2md_1to100"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2md_1to100
|
[
"region:us"
] |
2023-04-13T01:31:51+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "float64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 57712, "num_examples": 2000}, {"name": "validation", "num_bytes": 11550, "num_examples": 400}], "download_size": 29072, "dataset_size": 69262}}
|
2023-04-13T01:57:45+00:00
|
300de20eb2e90dc148250d36bdf04312b89f36b0
|
# Dataset Card for "arithmetic_2md_1to500"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2md_1to500
|
[
"region:us"
] |
2023-04-13T01:31:53+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "float64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 61140, "num_examples": 2000}, {"name": "validation", "num_bytes": 12234, "num_examples": 400}], "download_size": 34855, "dataset_size": 73374}}
|
2023-04-13T01:57:48+00:00
|
564da5285487dfbaf7453183260a7c38382cce21
|
# Dataset Card for "arithmetic_2md_1to1000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2md_1to1000
|
[
"region:us"
] |
2023-04-13T01:31:56+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "float64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 61528, "num_examples": 2000}, {"name": "validation", "num_bytes": 12316, "num_examples": 400}], "download_size": 36193, "dataset_size": 73844}}
|
2023-04-13T01:57:52+00:00
|
e73c30ddbb11c65285d06499d7fc6d6e7558d0de
|
# Dataset Card for "arithmetic_2all_1to5"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2all_1to5
|
[
"region:us"
] |
2023-04-13T01:58:31+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "float64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 54000, "num_examples": 2000}, {"name": "validation", "num_bytes": 10800, "num_examples": 400}], "download_size": 10946, "dataset_size": 64800}}
|
2023-04-13T01:58:33+00:00
|
6610f14581f13045a313769bd6b37eb86621a28a
|
# Dataset Card for "arithmetic_2all_1to10"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2all_1to10
|
[
"region:us"
] |
2023-04-13T01:58:34+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "float64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 54724, "num_examples": 2000}, {"name": "validation", "num_bytes": 10954, "num_examples": 400}], "download_size": 14967, "dataset_size": 65678}}
|
2023-04-13T01:58:36+00:00
|
33502d1b5cc2930d8b5c79bcd0ccb986c9843d0d
|
# Dataset Card for "arithmetic_2all_1to50"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2all_1to50
|
[
"region:us"
] |
2023-04-13T01:58:36+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "float64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 57306, "num_examples": 2000}, {"name": "validation", "num_bytes": 11472, "num_examples": 400}], "download_size": 23365, "dataset_size": 68778}}
|
2023-04-13T01:58:39+00:00
|
a38ed13bed83d4a14fbce46c9cab6bdb3ed73a86
|
# Dataset Card for "arithmetic_2all_1to100"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2all_1to100
|
[
"region:us"
] |
2023-04-13T01:58:39+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "float64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 57780, "num_examples": 2000}, {"name": "validation", "num_bytes": 11516, "num_examples": 400}], "download_size": 26097, "dataset_size": 69296}}
|
2023-04-13T01:58:41+00:00
|
64bb7a336049b7b9755bb6216306ee0b0a3ffbe7
|
# Dataset Card for "arithmetic_2all_1to500"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2all_1to500
|
[
"region:us"
] |
2023-04-13T01:58:42+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "float64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 61220, "num_examples": 2000}, {"name": "validation", "num_bytes": 12228, "num_examples": 400}], "download_size": 32495, "dataset_size": 73448}}
|
2023-04-13T01:58:44+00:00
|
5465e62175b4131a5ca78cfd589b1c4d226986c4
|
# Dataset Card for "arithmetic_2all_1to1000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sethapun/arithmetic_2all_1to1000
|
[
"region:us"
] |
2023-04-13T01:58:44+00:00
|
{"dataset_info": {"features": [{"name": "expression", "dtype": "string"}, {"name": "answer", "dtype": "float64"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "false", "1": "true"}}}}], "splits": [{"name": "train", "num_bytes": 61562, "num_examples": 2000}, {"name": "validation", "num_bytes": 12310, "num_examples": 400}], "download_size": 33850, "dataset_size": 73872}}
|
2023-04-13T01:58:47+00:00
|
cbc3958eb133d630c41dc12a2e7ddfa5bac384d5
|
Alejandrapulidoa/stocks
|
[
"language:en",
"finance",
"region:us"
] |
2023-04-13T02:32:03+00:00
|
{"language": ["en"], "tags": ["finance"]}
|
2023-04-13T02:36:31+00:00
|
|
f3bb372e57554d8da7aec6b8683f876e2d361ff1
|
AutoBG/AutoBG-Data
|
[
"license:cc-by-nc-sa-2.0",
"region:us"
] |
2023-04-13T02:51:52+00:00
|
{"license": "cc-by-nc-sa-2.0"}
|
2023-04-14T03:16:18+00:00
|
|
502cdb1bf8f6ba8d76122c966130d5c52bb44ced
|
# Dataset Card for "asr_capstone"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Sammarieo/asr_capstone
|
[
"region:us"
] |
2023-04-13T04:22:38+00:00
|
{"dataset_info": {"features": [{"name": "path", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 14641650.150537634, "num_examples": 74}, {"name": "test", "num_bytes": 4118361.8494623657, "num_examples": 19}], "download_size": 15110862, "dataset_size": 18760012.0}}
|
2023-04-14T03:19:11+00:00
|
b1052630a98f0fc4fa6db45c5f6c57cf8a841365
|
# Dataset Card for "Sinhala-400M"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Ransaka/Sinhala-400M
|
[
"task_categories:text-generation",
"task_categories:feature-extraction",
"size_categories:10M<n<100M",
"language:si",
"license:apache-2.0",
"region:us"
] |
2023-04-13T04:28:53+00:00
|
{"language": ["si"], "license": "apache-2.0", "size_categories": ["10M<n<100M"], "task_categories": ["text-generation", "feature-extraction"], "pretty_name": "Sinhala Large Scale Corpus", "dataset_info": {"features": [{"name": "text", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2802808058.089643, "num_examples": 8854185}, {"name": "test", "num_bytes": 1201203543.9103568, "num_examples": 3794651}], "download_size": 1826451430, "dataset_size": 4004011602}}
|
2023-12-11T03:03:00+00:00
|
b7a0816b10826fbadfed7c6378c6d5db750f580b
|
Smoden/ALICE_IMAGE_DATASET
|
[
"license:cc-by-nc-4.0",
"region:us"
] |
2023-04-13T04:29:54+00:00
|
{"license": "cc-by-nc-4.0"}
|
2023-04-21T13:15:46+00:00
|
|
5a024015ac19d1321305939b88aac0b5a6e08825
|
### Dataset Summary
First 10k rows of the scientific_papers["pubmed"] dataset. 10:1:1 split.
### Usage
```
from datasets import load_dataset
train_dataset = load_dataset("ronitHF/pubmed-10k", split="train")
val_dataset = load_dataset("ronitHF/pubmed-10k", split="validation")
test_dataset = load_dataset("ronitHF/pubmed-10k", split="test")
```
|
ronitHF/pubmed-10k
|
[
"task_categories:summarization",
"size_categories:1K<n<10K",
"region:us"
] |
2023-04-13T04:43:38+00:00
|
{"size_categories": ["1K<n<10K"], "task_categories": ["summarization"], "pretty_name": "PubMed 10k"}
|
2023-04-16T17:33:15+00:00
|
66c2772660c18b2b6fd4af92a49e87d675faf039
|
# Dataset Card for "chat-data"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
0x70DA/chat-data
|
[
"region:us"
] |
2023-04-13T04:46:10+00:00
|
{"dataset_info": {"features": [{"name": "topic", "dtype": "string"}, {"name": "input", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 155665615.95775312, "num_examples": 147099}, {"name": "validation", "num_bytes": 17296885.042246886, "num_examples": 16345}], "download_size": 89370265, "dataset_size": 172962501.0}}
|
2023-04-13T04:57:14+00:00
|
ac4d8c30d458e1172b19275c099e3d2866b38f62
|
# Dataset Description
- **Blog:** https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm
- **Repo:** https://github.com/databrickslabs/dolly
# Databricks Dolly 15k Dataset with citations removed and in Alpaca Format
**NOTE**
This is a reupload of the Databricks dataset found [here](https://github.com/databrickslabs/dolly/tree/master/data), but modified to be in Alpaca format, and with the citation numbers removed.
This work is not my own, and all credit goes to Databricks.
# Dataset Overview
`databricks-dolly-15k` is a corpus of more than 15,000 records generated by thousands of Databricks employees to enable large language
models to exhibit the magical interactivity of ChatGPT. Databricks employees were invited to create prompt / response pairs in each of eight different instruction categories, including the seven outlined in the InstructGPT paper, as well as an open-ended free-form category. The contributors were instructed to avoid using information from any source on the web with the exception of Wikipedia (for particular subsets of instruction categories), and explicitly instructed to avoid using generative AI in formulating instructions or responses. Examples of each behavior were provided to motivate the
types of questions and instructions appropriate to each category.
Halfway through the data generation process, contributors were given the option of answering questions posed by other contributors. They were asked to rephrase the original question and only select questions they could be reasonably expected to answer correctly.
For certain categories contributors were asked to provide reference texts copied from Wikipedia. Reference text (indicated by the `context` field in the actual dataset) may contain bracketed Wikipedia citation numbers (e.g. `[42]`) which we recommend users remove for downstream applications.
# Intended Uses
While immediately valuable for instruction fine tuning large language models, as a corpus of human-generated instruction prompts, this dataset also presents a valuable opportunity for synthetic data generation in the methods outlined in the Self-Instruct paper. For example, contributor--generated prompts could be submitted as few-shot examples to a large open language model to generate a corpus of millions of examples of instructions in each of the respective InstructGPT categories.
Likewise, both the instructions and responses present fertile ground for data augmentation. A paraphrasing model might be used to restate each prompt or short responses, with the resulting text associated to the respective ground-truth sample. Such an approach might provide a form of regularization on the dataset that could allow for more robust instruction-following behavior in models derived from these synthetic datasets.
# Dataset
## Purpose of Collection
As part of our continuing commitment to open source, Databricks developed what is, to the best of our knowledge, the first open source, human-generated instruction corpus specifically designed to enable large language models to exhibit the magical interactivity of ChatGPT. Unlike other datasets that are limited to non-commercial use, this dataset can be used, modified, and extended for any purpose, including academic or commercial applications.
## Sources
- **Human-generated data**: Databricks employees were invited to create prompt / response pairs in each of eight different instruction categories.
- **Wikipedia**: For instruction categories that require an annotator to consult a reference text (information extraction, closed QA, summarization) contributors selected passages from Wikipedia for particular subsets of instruction categories. No guidance was given to annotators as to how to select the target passages.
## Annotator Guidelines
To create a record, employees were given a brief description of the annotation task as well as examples of the types of prompts typical of each annotation task. Guidelines were succinct by design so as to encourage a high task completion rate, possibly at the cost of rigorous compliance to an annotation rubric that concretely and reliably operationalizes the specific task. Caveat emptor.
The annotation guidelines for each of the categories are as follows:
- **Creative Writing**: Write a question or instruction that requires a creative, open-ended written response. The instruction should be reasonable to ask of a person with general world knowledge and should not require searching. In this task, your prompt should give very specific instructions to follow. Constraints, instructions, guidelines, or requirements all work, and the more of them the better.
- **Closed QA**: Write a question or instruction that requires factually correct response based on a passage of text from Wikipedia. The question can be complex and can involve human-level reasoning capabilities, but should not require special knowledge. To create a question for this task include both the text of the question as well as the reference text in the form.
- **Open QA**: Write a question that can be answered using general world knowledge or at most a single search. This task asks for opinions and facts about the world at large and does not provide any reference text for consultation.
- **Summarization**: Give a summary of a paragraph from Wikipedia. Please don't ask questions that will require more than 3-5 minutes to answer. To create a question for this task include both the text of the question as well as the reference text in the form.
- **Information Extraction**: These questions involve reading a paragraph from Wikipedia and extracting information from the passage. Everything required to produce an answer (e.g. a list, keywords etc) should be included in the passages. To create a question for this task include both the text of the question as well as the reference text in the form.
- **Classification**: These prompts contain lists or examples of entities to be classified, e.g. movie reviews, products, etc. In this task the text or list of entities under consideration is contained in the prompt (e.g. there is no reference text.). You can choose any categories for classification you like, the more diverse the better.
- **Brainstorming**: Think up lots of examples in response to a question asking to brainstorm ideas.
## Personal or Sensitive Data
This dataset contains public information (e.g., some information from Wikipedia). To our knowledge, there are no private person’s personal identifiers or sensitive information.
## Language
American English
# Known Limitations
- Wikipedia is a crowdsourced corpus and the contents of this dataset may reflect the bias, factual errors and topical focus found in Wikipedia
- Some annotators may not be native English speakers
- Annotator demographics and subject matter may reflect the makeup of Databricks employees
# License/Attribution
**Copyright (2023) Databricks, Inc.**
This dataset was developed at Databricks (https://www.databricks.com) and its use is subject to the CC BY-SA 3.0 license.
Certain categories of material in the dataset include materials from the following sources, licensed under the CC BY-SA 3.0 license:
Wikipedia (various pages) - https://www.wikipedia.org/
Copyright © Wikipedia editors and contributors.
|
c-s-ale/dolly-15k-instruction-alpaca-format
|
[
"size_categories:10K<n<100K",
"language:en",
"license:cc-by-3.0",
"instruction",
"region:us"
] |
2023-04-13T05:01:38+00:00
|
{"language": ["en"], "license": "cc-by-3.0", "size_categories": ["10K<n<100K"], "pretty_name": "Databricks Dolly 15k (Alpaca format, citations removed)", "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "category", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 12271354, "num_examples": 15015}], "download_size": 7801648, "dataset_size": 12271354}, "tags": ["instruction"]}
|
2023-04-13T05:08:38+00:00
|
db208c813091fc38a9d825debdcd53fe3cda4c3d
|
# Dataset Card for Nurburgring-J
## Dataset Description
- **Homepage:** [NurburgringJ Dataset Homepage](https://huggingface.co/kiriyamaX)
- **Repository:** [NurburgringJ Dataset Repository](https://huggingface.co/datasets/kiriyamaX/Nurburgring-J)
- **Paper:** NurburgringJ: A Dataset for Fine-Grained Vehicle Classification and Traffic Flow Analysis (to be published soon)
- **Point of Contact:** [NurburgringJ POC](mailto:[email protected])
|
kiriyamaX/Nurburgring-J
|
[
"license:bigscience-openrail-m",
"doi:10.57967/hf/0532",
"region:us"
] |
2023-04-13T05:37:35+00:00
|
{"license": "bigscience-openrail-m"}
|
2023-06-15T17:26:47+00:00
|
7b922180963e6cc74c0c6b66ba12e61b6e1e1be8
|
# Dataset Card for "java_unifiedbug_2_1"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
nguyenminh871/java_unifiedbug_2_1
|
[
"region:us"
] |
2023-04-13T05:41:55+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "func", "dtype": "string"}, {"name": "target", "dtype": {"class_label": {"names": {"0": true, "1": false}}}}, {"name": "project", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 10257684.819539886, "num_examples": 3349}, {"name": "test", "num_bytes": 3421270.213026591, "num_examples": 1117}, {"name": "validation", "num_bytes": 3421270.213026591, "num_examples": 1117}], "download_size": 7298469, "dataset_size": 17100225.245593067}}
|
2023-04-13T05:42:36+00:00
|
dbef4694737cdf5716f3fda309fcb3d7aeaa0cdb
|
# Roi'adan V'anzey Lycoris
[](https://ko-fi.com/Z8Z8L4EO)
WE ARE PROUDLY SPONSORED BY: https://www.piratediffusion.com/
JULY IS PLURAL PRIDE MONTH - You all know who you are, and you shall fear no longer - you have space on CivitAI just as much as the rest of everyone else. Our goal is to create niche safe spaces for those like us. If you're not plural, neurodivergent - it's ok LOL - you're welcome to support and just download and enjoy our content!
If you want to learn more please go here: https://thepluralassociation.org/ and support us, because we're being fake claimed into oblivion for "not being ashamed".
Never be ashamed if you have quirks.
JOIN THE DISCORD AND DEMAND THINGS OF US:
https://discord.gg/5t2kYxt7An
JOIN OUR SUBREDDIT: https://www.reddit.com/r/earthndusk/
Listen to the music that we've made that goes with our art:
https://open.spotify.com/playlist/00R8x00YktB4u541imdSSf?si=b60d209385a74b38
MODEL AND LORA REQUEST FORM: https://forms.gle/aZNw9E78yfmSDnxdA
|
EarthnDusk/Roiadan_Vanzey_Lycoris
|
[
"task_categories:text-to-image",
"size_categories:1K<n<10K",
"language:en",
"license:creativeml-openrail-m",
"lora",
"lycoris",
"locon",
"region:us"
] |
2023-04-13T06:16:25+00:00
|
{"language": ["en"], "license": "creativeml-openrail-m", "size_categories": ["1K<n<10K"], "task_categories": ["text-to-image"], "pretty_name": "Roiadan Vanzey Lycoris", "tags": ["lora", "lycoris", "locon"]}
|
2023-08-01T05:55:43+00:00
|
b92bea335b8cac447ca070ffc70b36bdc86c3430
|
# Dataset Card for "docvqa_1000_examples"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
prashanthpillai/docvqa_1000_examples
|
[
"region:us"
] |
2023-04-13T06:21:31+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "query", "struct": [{"name": "de", "dtype": "string"}, {"name": "en", "dtype": "string"}, {"name": "es", "dtype": "string"}, {"name": "fr", "dtype": "string"}, {"name": "it", "dtype": "string"}]}, {"name": "answers", "sequence": "string"}, {"name": "words", "sequence": "string"}, {"name": "bounding_boxes", "sequence": {"sequence": "float32", "length": 4}}, {"name": "answer", "struct": [{"name": "match_score", "dtype": "float64"}, {"name": "matched_text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "text", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 381430038.0, "num_examples": 1000}, {"name": "test", "num_bytes": 70769538.0, "num_examples": 200}], "download_size": 148367971, "dataset_size": 452199576.0}}
|
2023-04-13T06:21:59+00:00
|
6cd8dce3384d73f760a5421f7e698f0949279ed8
|
# NorPaca Norwegian Bokmål
This dataset is a translation to Norwegian Bokmål of [alpaca_gpt4_data.json](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM), a clean version of the [Alpaca dataset made at Stanford](https://huggingface.co/datasets/tatsu-lab/alpaca), but generated with GPT4.
# Prompt to generate dataset
```
Du blir bedt om å komme opp med et sett med 20 forskjellige oppgaveinstruksjoner. Disse oppgaveinstruksjonene vil bli gitt til en GPT-modell, og vi vil evaluere GPT-modellen for å fullføre instruksjonene.
Her er kravene:
1. Prøv å ikke gjenta verbet for hver instruksjon for å maksimere mangfoldet.
2. Språket som brukes til undervisningen bør også være mangfoldig. For eksempel bør du kombinere spørsmål med imperative instruksjoner.
3. Type instruksjoner bør være mangfoldig. Listen bør inneholde forskjellige typer oppgaver som åpen generering, klassifisering, redigering, etc.
2. En GPT-språkmodell skal kunne fullføre instruksjonen. For eksempel, ikke be assistenten om å lage visuell eller lydutgang. For et annet eksempel, ikke be assistenten om å vekke deg klokken 17.00 eller angi en påminnelse fordi den ikke kan utføre noen handling.
3. Instruksjonene skal være på norsk.
4. Instruksjonene skal være 1 til 2 setninger lange. Enten en imperativ setning eller et spørsmål er tillatt.
5. Du bør generere et passende input til instruksjonen. Inndatafeltet skal inneholde et spesifikt eksempel gitt for instruksjonen. Det bør involvere realistiske data og bør ikke inneholde enkle plassholdere. Innspillet bør gi betydelig innhold for å gjøre instruksjonen utfordrende, men bør ideelt sett ikke overstige 100 ord.
6. Ikke alle instruksjoner krever inndata. For eksempel, når en instruksjon spør om noen generell informasjon, "hva er den høyeste toppen i verden", er det ikke nødvendig å gi en spesifikk kontekst. I dette tilfellet legger vi ganske enkelt "<noinput>" i inntastingsfeltet.
7. Utgangen skal være et passende svar på instruksjonen og input.Sørg for at utgangen er mindre enn 100 ord.
Liste over 200 instrukser:
```
|
MasterThesisCBS/NorPaca
|
[
"task_categories:text-generation",
"language:no",
"language:nb",
"license:cc-by-4.0",
"instruction-finetuning",
"region:us"
] |
2023-04-13T07:16:47+00:00
|
{"language": ["no", "nb"], "license": "cc-by-4.0", "task_categories": ["text-generation"], "pretty_name": "NB Alpaca Norwegian Bokm\u00e5l", "tags": ["instruction-finetuning"], "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 54356020, "num_examples": 50961}, {"name": "test", "num_bytes": 1113587, "num_examples": 1041}], "download_size": 28514339, "dataset_size": 55469607}}
|
2023-04-14T06:09:06+00:00
|
95af874cf03d6a4afeec6960b07c9425e05eb4c0
|
This dataset was created by automatically translating "databricks-dolly-15k" into Japanese.
This dataset is licensed under CC-BY-SA-3.0
Last Update : 2023-05-11
databricks-dolly-15k-ja
https://github.com/kunishou/databricks-dolly-15k-ja
databricks-dolly-15k
https://github.com/databrickslabs/dolly/tree/master/data
|
kunishou/databricks-dolly-15k-ja
|
[
"license:cc-by-sa-3.0",
"region:us"
] |
2023-04-13T07:31:08+00:00
|
{"license": "cc-by-sa-3.0"}
|
2023-09-10T12:47:12+00:00
|
2f4853c15597e96e18386609869381cdccb92f07
|
# Dataset Card for Genshin Voice
## Dataset Description
### Dataset Summary
The Genshin Voice dataset is a text-to-voice dataset of different Genshin Impact characters unpacked from the game.
### Languages
The text in the dataset is in Mandarin.
## Dataset Creation
### Source Data
#### Initial Data Collection and Normalization
The data was obtained by unpacking the [Genshin Impact](https://genshin.hoyoverse.com/) game.
#### Who are the source language producers?
The language producers are the employee of [Hoyoverse](https://hoyoverse.com/) and contractors from [EchoSky Studio](http://qx.asiacu.com/).
### Annotations
The dataset contains official annotations from the game, including ingame speaker name and transcripts.
## Additional Information
### Dataset Curators
The dataset was created by [w4123](https://github.com/w4123) initially in his [GitHub repository](https://github.com/w4123/GenshinVoice).
### Licensing Information
Copyright © COGNOSPHERE. All Rights Reserved.
|
hanamizuki-ai/genshin-voice-v3.5-mandarin
|
[
"task_categories:text-to-speech",
"task_categories:automatic-speech-recognition",
"multilinguality:monolingual",
"source_datasets:original",
"language:zh",
"region:us"
] |
2023-04-13T07:33:45+00:00
|
{"language": ["zh"], "multilinguality": ["monolingual"], "source_datasets": ["original"], "task_categories": ["text-to-speech", "automatic-speech-recognition"], "pretty_name": "Genshin Voice", "dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "language", "dtype": "string"}, {"name": "npcName", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "type", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 33310846721.498, "num_examples": 67921}], "download_size": 17251924784, "dataset_size": 33310846721.498}}
|
2023-04-13T13:47:16+00:00
|
3fc79783aa7694c0f4030c34a59e4ffa69b5df50
|
# AutoTrain Dataset for project: colors-1
## Dataset Description
This dataset has been automatically processed by AutoTrain for project colors-1.
### Languages
The BCP-47 code for the dataset's language is unk.
## Dataset Structure
### Data Instances
A sample from this dataset looks as follows:
```json
[
{
"image": "<1920x1080 P PIL image>",
"target": 1
},
{
"image": "<1000x563 RGB PIL image>",
"target": 1
}
]
```
### Dataset Fields
The dataset has the following fields (also called "features"):
```json
{
"image": "Image(decode=True, id=None)",
"target": "ClassLabel(names=['blue', 'green', 'red'], id=None)"
}
```
### Dataset Splits
This dataset is split into a train and validation split. The split sizes are as follow:
| Split name | Num samples |
| ------------ | ------------------- |
| train | 10 |
| valid | 3 |
|
AiBototicus/autotrain-data-colors-1
|
[
"task_categories:image-classification",
"region:us"
] |
2023-04-13T07:43:29+00:00
|
{"task_categories": ["image-classification"]}
|
2023-04-13T07:48:36+00:00
|
dafebfd38d222e5c1e4e5fcc0cbad8bd8e7f81b5
|
年份 政策因素 与徐工创新的关系
1997 所得税优惠政策:江苏省人民政府办公厅同意公司作为江苏省高新技术企业,执行15%所得税率。公司所得税按应纳税所得额的33%计征,对超过15%部分由财政返还,实际税负为15%。所得税返还部分进入公司净利润,增加可供分配利润。 这些税收优惠政策使徐工作为高新技术企业享受到较低的所得税税率,从而降低了公司的税收负担,有助于提高公司的净利润。增加的可供分配利润可以进一步用于公司的研发投入,促进徐工的技术创新。
1997 B股股份政策:公司发行B股后,B股股份超过25%,并经外经贸部批准后,可转为外商投资股份有限公司,并享受国家给予外商投资企业的相关优惠政策。 这一政策使徐工有机会通过发行B股增加资金来源,以满足公司扩张和创新的需求。此外,若B股股份超过25%并转为外商投资股份有限公司,徐工还能享受到国家给予外商投资企业的优惠政策,进一步降低成本,增强公司的竞争力和创新能力。
1998 国家扩大内需政策以及西部大开发战略的确立 国家通过执行扩大内需的积极政策以及确立西部大开发战略,进一步刺激了国内市场的需求,为徐工提供了良好的市场机遇。这种宏观环境的变化有助于徐工的主营业务发展,使公司有更多的资源和资金投入到技术创新和研发中,从而提升徐工的产品竞争力和市场份额。此外,西部大开发战略的推进可能会带来基建项目的增加,从而提升徐工工程机械设备的需求,刺激公司加大创新力度,以满足不同项目的需求。
2000 国家继续执行扩大内需政策以及西部大开发进入实施阶段 国家通过继续执行扩大内需的积极政策以及将西部大开发推进到实施阶段,进一步刺激了国内市场的需求,为徐工提供了良好的市场机遇。这种宏观环境的变化有助于徐工的主营业务发展,使公司有更多的资源和资金投入到技术创新和研发中,从而提升徐工的产品竞争力和市场份额。特别是西部大开发的实施,可能会带来基建项目的增加,从而提升徐工工程机械设备的需求,刺激公司加大创新力度,以满足不同项目的需求。
2002 国家继续实施积极的财政政策,依靠扩大内需刺激国民经济的发展,国内工程机械行业普遍增长,西部开发政策影响下销售量增长 国家通过实施积极的财政政策以及扩大内需来刺激国民经济发展,在此背景下,国内工程机械生产企业克服加入WTO带来的冲击,积极迎接市场挑战,行业出现普遍增长。徐工可借助这一宏观环境的有利条件,实现公司主营业务的增长。此外,开发西部政策的影响下,徐工的销售量有望实现较大增长,公司可在年底时加大生产以满足市场需求。这为徐工提供了更多的资源和资金投入到技术创新和研发中,从而提升徐工的产品竞争力和市场份额。
2002 企业所得税政策调整,母公司和子公司的适用税率为33%,公司所得税先按应纳税所得额的33%计征,再给予18%的财政返还 调整后的企业所得税政策对徐工的税收负担产生了影响。虽然母公司和子公司的适用税率为33%,但是通过财政返还的方式,实际税负得到了一定的缓解。这有助于降低公司的税收负担,为公司提供更多的资金用于研发和技术创新,从而进一步提升徐工的竞争力。
2003 国家坚持扩大内需方针,实施积极财政政策和稳健货币政策,工程机械市场保持强劲增长 国家通过坚持扩大内需的方针以及实施积极的财政政策和稳健的货币政策,促使全社会固定资产投资快速增长,进而拉动工程机械市场保持强劲增长的态势,为徐工提供了良好的市场机遇。在这种宏观环境的支持下,徐工的生产和销售实现了显著增长,主营业务收入和利润总额均实现了增长。这为徐工提供了更多的资源和资金投入到技术创新和研发中,从而提升徐工的产品竞争力和市场份额。
2003 原材料涨价、产品降价、税负增加、金融机构调整信贷、保险政策 2003年,由于原材料(如钢材、橡胶、油品)价格持续大幅上涨,同时行业竞争激烈导致产品价格下滑,公司的盈利空间受到挤压。此外,所得税先征后返政策的取消导致税负增加,而金融机构调整信贷、保险政策导致按揭销售业务暂停,给徐工的生产经营带来一定困难。这些因素对徐工的经营成果产生了一定影响,也使得公司面临更大的经营风险。因此,徐工需要加大技术创新和研发力度,提升产品质量和性能,以应对市场的挑战,并积极寻求合作机会、降低成本以提升盈利能力。
2004 国家宏观经济调控政策影响,工程机械市场波澜起伏,固定资产投资回落,市场需求增长减缓,产品供需矛盾突出,原材料价格上涨,电力紧缺 2004年,受国家宏观经济调控政策的影响,国内工程机械市场经历了波澜起伏的巨幅波动,市场需求增长减缓,产品供需矛盾突出,竞争异常激烈。钢材、橡胶、油品等原材料价格持续上涨以及电力紧缺等问题给徐工的生产经营带来了巨大挑战。在这种情况下,徐工需要更加注重技术创新和研发,提升产品质量和性能,以应对市场的挑战。同时,徐工可通过降低成本、提升生产效率、优化产品结构等方式,增强公司的竞争力和抵御市场风险的能力。此外,由于市场需求增长减缓,徐工可考虑开拓新的市场领域和拓展新的业务模式,以实现公司的持续发展。
2005 国家宏观经济调控政策,控制固定资产投资增长幅度,金融适度紧缩政策,工程机械市场需求量锐减 2005年,受国家宏观经济调控政策的影响,固定资产投资增长幅度受到控制,金融采取适度紧缩政策,导致工程机械市场需求量锐减。由于市场需求减少,行业内部竞争加剧,而且市场集中度进一步加强。面对这种局面,徐工采取了积极应对措施,其中之一是抓住国际市场机遇,加快国际化进程,通过推动产品升级、优化出口流程、提高产品质量、加大服务力度等措施大力开拓海外市场。实现外销收入同比增长147.62%,出口中高新技术产品占50%以上。这也意味着徐工在产品创新和技术升级方面取得了一定的成效,这有助于提升企业的整体竞争力和市场份额。
2005 “十一五”期间国家投资力度将超过“十五”期间,重大基础设施建设和城市化建设带动工程机械市场需求向好 虽然2005年徐工面临市场需求锐减的挑战,但国家在“十一五”期间的投资力度将超过“十五”期间,特别是交通运输业、水利、电力、能源基地建设、城市化建设、国家生态环境建设及国防建设等领域的投资,为工程机械产品的市场发展提供了良好的机遇。这意味着未来徐工有望借助国家基础设施建设的有利政策,实现市场的快速恢复和增长。因此,徐工应继续加大技术创新和研发力度,提升产品质量和性能,以应对市场的挑战,并积极抓住国家基础设施建设带来的市场机遇
2006 “十一五”期间国家重点工程项目的陆续开工、城市化、新农村建设对工程机械需求推动较大,出口成为拉动全行业快速增长的主要因素 2006年,“十一五”期间,由于国家重点工程项目的陆续开工、城市化进程以及新农村建设的推进,这些因素都对工程机械的需求产生了积极的推动作用。同时,世界经济的持续发展也为我国工程机械产品的出口创造了有利条件,出口成为了拉动整个工程机械行业快速增长的主要因素。在这种情况下,徐工有机会通过创新提升产品的质量、性能和品种,以满足国内外市场的需求,并进一步拓展国际市场份额。
2006 工程机械行业的发展趋势:注重品种、质量、效益,节能环保,自主创新,行业竞争全方位转向品牌、质量、服务 2006年,工程机械行业的发展趋势逐渐从注重生产数量转向注重产品品种、质量和效益,同时行业也开始注重节能和环保,以及自主创新,不断增强国际竞争力。行业竞争的重点也从价格竞争转向品牌、质量、服务等全方位竞争。因此,徐工作为工程机械行业的重要企业,应积极适应这些趋势,加大研发投入,提升产品品质,强化品牌形象,提升服务质量,从而提升企业的核心竞争力,并在行业内保持领先地位。
2007 “十一五”期间国家重点工程项目的陆续开工、城市化、新农村建设对工程机械需求推动较大,出口成为拉动全行业快速增长的主要因素 2007年,“十一五”期间,由于国家重点工程项目的陆续开工、城市化进程以及新农村建设的推进,这些因素都对工程机械的需求产生了积极的推动作用。同时,世界经济的持续发展也为我国工程机械产品的出口创造了有利条件,出口成为了拉动整个工程机械行业快速增长的主要因素。在这种情况下,徐工有机会通过创新提升产品的质量、性能和品种,以满足国内外市场的需求,并进一步拓展国际市场份额。
2007 工程机械行业的发展趋势:注重品种、质量、效益,节能环保,自主创新,行业竞争全方位转向品牌、质量、服务 2007年,工程机械行业的发展趋势逐渐从注重生产数量转向注重产品品种、质量和效益,同时行业也开始注重节能和环保,以及自主创新,不断增强国际竞争力。行业竞争的重点也从价格竞争转向品牌、质量、服务等全方位竞争。因此,徐工作为工程机械行业的重要企业,应积极适应这些趋势,加大研发投入,提升产品品质,强化品牌形象,提升服务质量,从而提升企业的核心竞争力,并在行业内保持领先地位。
2008 2008年上半年市场需求旺盛,工程机械内销及出口增长迅速;下半年受全球性金融危机影响,国内和国际市场需求锐减 2008年中国工程机械市场经历了冰火两重天的状态。上半年市场需求旺盛,工程机械内销及出口增长迅速,这为徐工带来了良好的市场机遇,有利于企业通过创新提升产品性能和质量,拓展市场份额。然而,下半年受到全球性金融危机的影响,国内和国际市场需求锐减,徐工可能面临市场需求减少、产能过剩、市场竞争激烈等挑战。在此背景下,徐工需要加强自主创新能力,优化产品结构,提升产品品质和服务水平,以增强企业的抗风险能力。
2008 工程机械行业装载机产品市场竞争激烈;压路机、摊铺机产品得益于出口销量增长 2008年,装载机产品市场历经多年的爆发式增长,各主要企业产能大幅度提升,导致市场竞争激烈。在这种环境下,徐工作为工程机械制造企业之一,需要通过创新提升装载机产品的性能、品质和服务,以应对激烈的市场竞争。此外,压路机、摊铺机等筑路机械产品销量略有提升,主要得益于出口销量的增长。这提示徐工有机会通过加大对外贸易力度,拓展国际市场,增加压路机、摊铺机等筑路机械产品的销量。
2009 国家宏观政策上预计不会使投资骤减,国家 4 万亿的投资,特别是在铁路、公路、城镇化建设等方面的投资,仍将会保持较大的投资增幅;工程机械行业自 2009 年初逐步回暖,加之工程机械租赁销售业务的迅速发展。 2009年,国际金融危机影响下,国际市场缓慢复苏,外部市场不稳固。为刺激经济,国家实施了一系列宏观政策,包括4万亿的投资计划,特别是在基础设施建设领域的投资。这些政策带动了工程机械行业的增长,并拉动了公司的营业收入。未来我国工程机械行业的发展将从注重数量转向注重品种、质量、效益,注重节能与环保,注重自主创新,不断增强竞争力。在这样的趋势下,徐工需加大创新力度,提升产品质量与效益,强化品牌和服务,以适应行业竞争的全方位转变,增强企业竞争力1
2008 国家四万亿投资计划等一系列刺激经济措施及政策的出台 2008年,由于国际金融危机的影响,工程机械行业处于发展周期的低点,而徐工作为该行业的企业也受到了影响。然而,随着国家四万亿投资计划等一系列刺激经济措施及政策的出台,工程机械行业的预期发生了变化,行业自2009年初逐步回暖。这些政策措施的出台有助于提振行业信心,刺激了工程机械租赁销售业务的迅速发展,从而促使徐工2010年度营业收入较预测大幅上升。此外,这些政策措施也为徐工提供了更多的市场机会,促使徐工积极进行技术研发和创新。
2010 7 个项目获政府立项资助 2010年,徐工共有7个科研项目获得了政府立项资助,这些资金资助为徐工提供了更多的研发经费,有助于推动科研项目的实施和发展。这些资助的项目包括步履式山地挖掘机、QAY500全地面起重机、智能化路面施工机械开发应用等。这些项目的实施不仅帮助徐工提升了科技水平,还有助于增强其市场竞争力,推动了公司在工程机械领域的创新发展。
2010 1 个项目列入国家火炬计划 徐工有1个科研项目被列入国家火炬计划,该计划是国家级的科技创新项目,旨在支持高新技术产业的发展。被列入火炬计划意味着该项目具有较高的科技含量和市场潜力,也意味着项目能够获得国家的政策扶持和资金支持。这对于徐工进一步推动科技创新和提升核心竞争力具有积极意义,也有助于徐工在工程机械行业保持领先地位。
2011 政府科技政策支持 在2011年,徐工积极争取政府科技政策支持,获得财政资金近6000万元,申报省级以上科技项目108个,其中82个获得政府立项,其中“160至1200吨大型全地面起重机研发与产业化”项目获得国家重大科技成果转化项目无偿资金支持3500万元;“千吨级超大履带起重机”项目列入国家863计划项目。公司58个新产品被认定为省高新技术产品。公司主持起草了12项行业标准,新增授权专利339项,发明专利实现重要突破,获授权18项。这些政策支持有助于公司加大科技研发投入,提升技术创新能力,培养自主创新能力,推动产品升级和企业发展1
2012 国家高新技术企业认定及税收优惠政策 徐工及旗下子公司在2012年获得江苏省高新技术企业认定,包括徐州重型机械有限公司、徐州徐工随车起重机有限公司、徐州徐工筑路机械有限公司、徐州徐工特种工程机械有限公司、徐州徐工液压件有限公司和徐州徐工铁路装备有限公司。这些公司自2008年、2009年以及2012年起分别三年内享受国家高新技术企业15%的企业所得税优惠税率。这项政策减轻了公司的税收负担,使公司有更多的资金用于科研和技术创新,进一步提升公司的核心竞争力,推动企业持续健康发展1
2013 徐工集团及其子公司享受税收优惠,适用税率为15%,境外子公司适用当地所得税1 徐工集团及其子公司因为享受税收优惠,适用较低的企业所得税率,有助于减轻企业税负,为企业自主创新提供更多的资金支持。
2013 徐工集团及其子公司被认定为江苏省高新技术企业,享受税收优惠2 徐工集团及其子公司作为高新技术企业,能够享受税收优惠,有助于企业提升自主创新能力,促进科技创新发展。
2013 徐工集团自主创新硕果累累,国家认定企业技术中心排名上升,成功研发高技术产品3 徐工集团自主创新能力强,企业技术中心在全国排名持续上升,成功突破全球履带式起重机行业技术的局限,并实现全球首吊,取得了一系列创新成果,这有助于提升企业核心竞争力。
2014 徐工集团及其子公司享受税收优惠,适用税率为15%,境外子公司适用当地所得税1 徐工集团及其子公司因为享受税收优惠,适用较低的企业所得税率,有助于减轻企业税负,为企业自主创新提供更多的资金支持。
2014 徐工集团及其子公司被认定为江苏省高新技术企业,享受税收优惠2 徐工集团及其子公司作为高新技术企业,能够享受税收优惠,有助于企业提升自主创新能力,促进科技创新发展。
2014 徐工集团自主创新硕果累累,推出高端产品,突破市场创新3 徐工集团自主创新能力强,成功推出一批“三高一大”产品亮相2014年上海宝马展,包括全球首创轮履两用概念起重机等,取得了一系列创新成果,这有助于提升企业核心竞争力。
2015 徐工集团及其子公司享受税收优惠,适用税率为15%,境外子公司适用当地所得税1 徐工集团及其子公司因为享受税收优惠,适用较低的企业所得税率,有助于减轻企业税负,为企业自主创新提供更多的资金支持。
2015 徐工集团及其子公司被认定为江苏省高新技术企业,享受税收优惠2 徐工集团及其子公司作为高新技术企业,能够享受税收优惠,有助于企业提升自主创新能力,促进科技创新发展。
2015 徐工集团自主创新能力强,掌握核心技术超百项,获得授权有效专利超千项3 徐工集团自主创新能力强,掌握大量核心技术,拥有众多有效专利,这有助于提升企业的技术实力和研发水平,进一步强化了徐工在工程机械行业的领先地位。
2016 徐工集团拥有引领行业的技术优势与研发能力1 徐工集团拥有技术优势,掌握的核心技术超过百项,获得授权有效专利超过千项,拥有全球协同的研发网络布局以及国内一流的工程机械研发机构。这表明徐工集团具备强大的自主创新能力和科研开发实力,这有助于提升企业的核心竞争力。
2016 徐工集团实现核心技术产品研发的重大突破2 徐工集团在2016年实现了核心技术产品研发的重大突破,推出了多款新型高端主机和核心零部件,提升了产品的技术性能和质量。这些创新成果进一步提升了企业的核心竞争力,并强化了徐工在工程机械行业的领先地位。
2016 徐工集团启动互联网+行动,加快建设工业云平台和全球物联网平台3 徐工集团通过启动互联网+行动,推进工业云平台的建设和全球物联网平台的初步建设,实现了信息技术与工程机械的深度融合。这有助于企业提升智能化水平,优化销售服务,提高管理效率,进一步提升企业的核心竞争力和市场地位。
2017 徐工集团拥有行业领先的技术创新能力1 徐工集团面向世界科技前沿、经济主战场和国家重大需求,致力于掌握工程机械各产业产品的全球科技竞争先机,打造“技术领先、用不毁”的高端产品群。公司拥有一系列国家级、省级研发平台和创新中心,并累计授权专利4493件,其中发明专利1046件,从而有效打造了行业领先的技术研发优势。这些因素有助于徐工集团持续推动技术创新、提升产品性能,进一步强化徐工在工程机械行业的领先地位2
2017 公司面向世界科技前沿、面向经济主战场、面向国家重大需求,打造“技术领先、用不毁”的高端产品群 徐工2017年研发投入占营业收入比例为5.51%,一半以上用于关键核心技术研究、重大实验设备设施建设等,推动技术创新和高端产品研发。公司依托徐工集团国家级技术中心和企业国家重点实验室2个研发平台,4个省级工程技术研究中心,5个省级企业技术中心,1个省级制造业创新中心,以及徐工国家级博士后科研工作站、院士工作站等打造世界级的创新体系1
2010-2017 公司获得多项科技进步奖,获得中国专利金奖,推出全球首创的起重机和消防车等标志性产品 徐工自主研制的创新产品在市场上取得了良好的表现,获得了一系列国家级科技进步奖和中国专利金奖的认可。公司成功研制出全球首创最大起重能力达88000吨.米的履带起重机、全球首创轮履两用概念起重机、全球第一高度JP80举高喷射消防车等,引领中国高端制造2
2014-2017 公司获得“全国质量奖”,持续保持全国质量奖荣誉称号 徐工在深入推行卓越绩效模式及质量经营管理创新做法和取得成效方面得到了全国质量奖评审专家组的肯定和高度评价,这有助于提升公司品质核心竞争力3
2017 公司加强军民融合发展,成立军品研究所,加紧实施无人操控、全自动变速箱等重大研发项目 徐工具有军工资质,全资子公司徐工重型具有武器装备科研生产许可证等军工资质,公司坚定实施军民融合战略,始终保持军用工程机械行业第一位,加紧全新军品开发;被总书记誉为“钢铁螳螂”的军工产品山地挖掘机获部队超亿元订单。这有助于提升公司在军工领域的市场份额并推动技术创新1
2017 公司持续推进质量2020-用不毁提升工程,新一代重大创新产品加快走向市场 徐工推进质量2020-用不毁提升工程,使行动金标准上升为公司的质量方针,推动产品质量水平不断迈上新台阶。新一代重大创新产品加快走向市场,例如“全球第一吊”4000吨履带起重机完成近百次吊装,全球首台八轴1200吨全地面起重机交付大客户备受认可。这有助于提升公司产品的竞争力,并强化品牌形象2
2017 公司制订国家及行业标准78项,新增授权专利691项,拥有有效授权专利4493项、发明专利1041项,PCT国际专利19件取得国外授权 徐工在科技创新方面取得了显著成果,积极参与制订国家及行业标准,不断增加专利授权数量,其中包括国际专利授权。这表明徐工在技术创新方面具有较强的研发能力和知识产权保护能力,有助于维护公司的核心竞争优势3
2018 根据政府工作报告,一是区域发展将塑造新格局:推进京津冀协同发展、高标准雄安新区建设、粤港澳大湾区发展规划实施;二是新型城镇化提高质量:公共交通优先发展,便民服务设施健全建设、老旧小区改造,排涝管网、地下综合管廊建设 这些政策的实施将促使基础设施投资保持合理规模,作为投资拉动型的工程机械行业,徐工有望从这些区域发展和新型城镇化建设中获得更多的市场机会。为了适应市场需求,徐工需要继续加大技术创新力度,提升产品质量和性能,满足大型工程施工和城市基础设施建设的需要1
2018 1. 徐工持续加大研发投入,依托徐工集团拥有“高端工程机械智能制造”国家重点实验室、国家企业技术中心等,构建起科技创新系统,获得“国家企业技术中心2017-2018年评价结果”优秀。<br>2. 徐工承担国家“863计划”、国家科技支撑计划、国家重点研发等省级以上项目,累计拥有国内有效授权专利5111件,其中发明专利1282件。<br>3. 徐工实施军民融合战略,保持军用工程机械列装部队的行业第一位置。<br>4. 徐工智能制造重点围绕智能研发、智能工厂、智能服务、智能管理和模式创新推进,建设智能制造基地,应用大数据、云计算、5G等新一代信息技术。 1. 徐工通过持续加大研发投入,提升技术创新能力,实现了科技创新系统的持续提升,为徐工的产品和技术研发提供了强大支持。<br>2. 徐工通过参与国家级科研项目,不断取得科技成果和专利,提升了创新实力,并通过自主研发的高端设备填补了国内空白,引领中国高端制造。<br>3. 徐工在军民融合发展方面的战略布局,有助于拓展公司的业务领域,提升产品竞争力和市场份额。<br>4. 徐工通过智能制造的全面推进,实现生产过程的自动化、数字化和智能化,提升了生产效率和产品质量,为公司未来的可持续发展奠定了基础1
2019 国家级智能制造试点示范、国家工业互联网应用试点示范、江苏省首批智能工厂、国家智能制造标杆企业、国家大数据产业发展试点示范项目 徐工在2019年被国家部委、省工信厅评定为国家级智能制造试点示范、国家工业互联网应用试点示范、江苏省首批智能工厂。这些政策支持与认可有助于徐工加大智能制造和新一代信息技术的研究与应用,推动公司实现智能化生产、智能化研发、智能化服务和智能化管理。此外,徐工全资子公司徐工重型荣获工信部颁发的国家智能制造标杆企业,这一荣誉成为行业唯一入选企业,彰显了徐工在智能制造领域的领先地位。徐工的“基于价值链运营增值的企业大数据创新应用项目”荣获2020年国家大数据产业发展试点示范项目,表明公司在大数据应用方面得到了国家的认可和支持,有助于公司在大数据创新应用方面取得更多突破1
2020 增值税政策与税率(6%、9%、13%) 影响徐工销售嵌入式软件产品的税负,对创新产品的销售收入有一定影响。
2020 城市维护建设税政策与税率(1%、5%、7%) 影响徐工的税负,可能影响公司的财务状况,进而影响公司的创新投入。
2020 企业所得税政策与税率(15%、25%) 对徐工及其子公司的税负产生影响,一些子公司因为被认定为高新技术企业享受15%的优惠税率,有助于公司的创新投入和研发活动。
2020 教育费附加政策与税率(5%) 影响徐工的税负,可能影响公司的财务状况,进而影响公司的创新投入。
2020 增值税即征即退政策 徐工销售嵌入式软件产品按适用税率征收增值税后,对其增值税实际税负超过3%的部分实行即征即退政策,降低税负,有利于公司创新和研发活动的投入。
2020 1. 增值税:应税收入,税率为6%、9%、13%<br>2. 城市维护建设税:应缴纳流转税额,税率为1%、5%、7%<br>3. 企业所得税:应纳税所得额,境内企业适用税率15%、25%<br>4. 教育费附加:应缴纳流转税额,税率为5%<br>5. 一些子公司被认定为高新技术企业,享受企业所得税优惠税率15% 对于被认定为高新技术企业的子公司,优惠的企业所得税税率有助于降低税收负担,进而鼓励企业进行技术创新和研发投入1
2021 1. 增值税:应税收入,税率为6%、9%、13%<br>2. 消费税:应缴纳流转税额,税率为1%、5%、7%<br>3. 企业所得税:应纳税所得额,境内企业适用税率15%、25%<br>4. 教育费附加:应缴纳流转税额,税率为5%<br>5. 一些子公司被认定为高新技术企业,享受企业所得税优惠税率15% 与2020年类似,被认定为高新技术企业的子公司享受优惠的企业所得税税率,有助于降低税收负担,进而鼓励企业进行技术创新和研发投入2
|
ssssasdasdasdasdqwd/data
|
[
"region:us"
] |
2023-04-13T08:01:27+00:00
|
{}
|
2023-04-13T08:02:01+00:00
|
d07e5265ea1ea189426813c42be71812ce11de59
|
# Dataset Card for Stable Diffusion v1.5 Glazed Samples
## Dataset Description
### Dataset Summary
This dataset contains image samples originally generated by [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5)
and subsequently processed by [Glaze](https://glaze.cs.uchicago.edu/) tool.
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
[More Information Needed]
|
hanamizuki-ai/stable-diffusion-v1-5-glazed
|
[
"task_categories:image-classification",
"task_categories:image-to-image",
"license:creativeml-openrail-m",
"art",
"region:us"
] |
2023-04-13T08:02:43+00:00
|
{"license": "creativeml-openrail-m", "task_categories": ["image-classification", "image-to-image"], "tags": ["art"], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "parent_id", "dtype": "string"}, {"name": "model", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "glaze_model", "dtype": "string"}, {"name": "glaze_intensity", "dtype": "int64"}, {"name": "glaze_render", "dtype": "int64"}, {"name": "glaze_style", "dtype": "string"}, {"name": "glaze_style_strength", "dtype": "float64"}, {"name": "image", "dtype": "image"}, {"name": "parent_image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 111462286297.0, "num_examples": 118980}], "download_size": 23365392724, "dataset_size": 111462286297.0}}
|
2023-04-14T02:57:57+00:00
|
382043441d0327645f622cb39c1dea8ea95c93ba
|
# Dataset Card for Dataset Name
## Dataset Description
- **Homepage:**
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1).
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
[More Information Needed]
|
JoeJYu/SexismCategory
|
[
"region:us"
] |
2023-04-13T08:10:36+00:00
|
{}
|
2023-04-13T09:17:35+00:00
|
9bf7b07e9a0144dbae451bccc3dbeea710a9a9fe
|
# Splice
The [Splice dataset](https://archive-beta.ics.uci.edu/dataset/69/molecular+biology+splice+junction+gene+sequences) from the [UCI repository](https://archive-beta.ics.uci.edu/).
# Configurations and tasks
| **Configuration** | **Task** |
|-------------------|---------------------------|
| splice | Multiclass classification |
| splice_EI | Binary classification |
| splice_IE | Binary classification |
| splice_N | Binary classification |
|
mstz/splice
|
[
"task_categories:tabular-classification",
"size_categories:1K<n<10K",
"language:en",
"license:cc",
"splice",
"tabular_classification",
"binary_classification",
"multiclass_classification",
"UCI",
"region:us"
] |
2023-04-13T08:16:09+00:00
|
{"language": ["en"], "license": "cc", "size_categories": ["1K<n<10K"], "task_categories": ["tabular-classification"], "pretty_name": "Splice", "tags": ["splice", "tabular_classification", "binary_classification", "multiclass_classification", "UCI"], "configs": ["splice", "splice_EI", "splice_IE", "splice_N"]}
|
2023-04-16T17:03:01+00:00
|
b51fc2e7a015cff09909ccc8355875326dd8d87e
|
# Dataset Card for "databricks-dolly-15k-es-deepl"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
dvilasuero/databricks-dolly-15k-es-deepl
|
[
"region:us"
] |
2023-04-13T08:20:32+00:00
|
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "category", "dtype": "string"}, {"name": "instruction_en", "dtype": "string"}, {"name": "context_en", "dtype": "string"}, {"name": "response_en", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 25838910, "num_examples": 15015}], "download_size": 16464221, "dataset_size": 25838910}}
|
2023-04-13T09:28:31+00:00
|
29e3c7bec4a836e2bec32cf06dd31223f6e8399c
|
# Dataset Card for "donut_trial"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
shrey9669/donut_trial
|
[
"region:us"
] |
2023-04-13T08:30:38+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "ground_truth", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1761988.0, "num_examples": 3}, {"name": "test", "num_bytes": 424265.0, "num_examples": 1}, {"name": "validation", "num_bytes": 429924.0, "num_examples": 1}], "download_size": 1866897, "dataset_size": 2616177.0}}
|
2023-04-13T08:34:22+00:00
|
87fd1d34d76a46739643e0b92aec9b5d02d77255
|
# Nursery
The [Nursery dataset](https://archive-beta.ics.uci.edu/dataset/76/nursery) from the [UCI repository](https://archive-beta.ics.uci.edu/).
Should the nursery school accept the student application?
# Configurations and tasks
| **Configuration** | **Task** |
|-------------------|---------------------------|
| nursery | Multiclass classification |
| nursery_binary | Binary classification |
|
mstz/nursery
|
[
"task_categories:tabular-classification",
"size_categories:1K<n<10K",
"language:en",
"license:cc",
"nursery",
"tabular_classification",
"UCI",
"region:us"
] |
2023-04-13T08:32:14+00:00
|
{"language": ["en"], "license": "cc", "size_categories": ["1K<n<10K"], "task_categories": ["tabular-classification"], "pretty_name": "Nursery", "tags": ["nursery", "tabular_classification", "UCI"], "configs": ["nursery", "nursery_binary"]}
|
2023-04-16T16:57:18+00:00
|
5468eba8d97ab53079ea33499b4018669b1057a6
|
# Dataset Card for "chunk_230"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_230
|
[
"region:us"
] |
2023-04-13T08:43:22+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 19038346416.875, "num_examples": 198217}], "download_size": 16862095514, "dataset_size": 19038346416.875}}
|
2023-04-13T08:55:23+00:00
|
a88c84970cdaff6f0452f9ef68cad70572dda839
|
# Dataset Card for "chunk_229"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_229
|
[
"region:us"
] |
2023-04-13T08:46:45+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 17427141216.75, "num_examples": 181442}], "download_size": 14290629007, "dataset_size": 17427141216.75}}
|
2023-04-13T08:55:47+00:00
|
3d0153566b9f7e5dc18e99ba89b7b0fc1a537152
|
# PageBlocks
The [PageBlocks dataset](https://archive-beta.ics.uci.edu/dataset/76/page_blocks) from the [UCI repository](https://archive-beta.ics.uci.edu/).
How many transitions does the page block have?
# Configurations and tasks
| **Configuration** | **Task** |
|-------------------|---------------------------|
| page_blocks | Multiclass classification |
| page_blocks_binary| Binary classification |
|
mstz/page_blocks
|
[
"task_categories:tabular-classification",
"size_categories:1K<n<10K",
"language:en",
"license:cc",
"page_blocks",
"tabular_classification",
"binary_classification",
"multiclass_classification",
"region:us"
] |
2023-04-13T08:52:40+00:00
|
{"language": ["en"], "license": "cc", "size_categories": ["1K<n<10K"], "task_categories": ["tabular-classification"], "pretty_name": "Page Blocks", "tags": ["page_blocks", "tabular_classification", "binary_classification", "multiclass_classification"], "configs": ["page_blocks", "page_blocks_binary"]}
|
2023-04-16T16:57:31+00:00
|
a6b8c27a2e9fc3612e9e1d59863da0df2973ec3d
|
# Dataset Card for "chunk_235"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_235
|
[
"region:us"
] |
2023-04-13T08:53:29+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 22014777888.25, "num_examples": 229206}], "download_size": 20041888330, "dataset_size": 22014777888.25}}
|
2023-04-13T09:05:23+00:00
|
a544dda4af454459a7ebee6608722b27127ab38c
|
# Dataset Card for "chunk_233"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_233
|
[
"region:us"
] |
2023-04-13T08:55:57+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 21135266352.875, "num_examples": 220049}], "download_size": 19423908394, "dataset_size": 21135266352.875}}
|
2023-04-13T09:10:05+00:00
|
ce073b91ff7ab66e22cef92dd188517bb95fe30c
|
alexwww94/SimCLUE
|
[
"license:other",
"region:us"
] |
2023-04-13T08:56:06+00:00
|
{"license": "other"}
|
2023-04-14T05:40:03+00:00
|
|
ae3dd5dbf7bc9b0af13c7cb790b7963445895b94
|
# Dataset Card for "databricks-dolly-15k-es-deepl"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
argilla/databricks-dolly-15k-es-deepl
|
[
"region:us"
] |
2023-04-13T09:30:14+00:00
|
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "category", "dtype": "string"}, {"name": "instruction_en", "dtype": "string"}, {"name": "context_en", "dtype": "string"}, {"name": "response_en", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 25838910, "num_examples": 15015}], "download_size": 16464221, "dataset_size": 25838910}}
|
2023-04-13T09:30:19+00:00
|
8d8fd5d1dae907e2d73a5398926941bb64795fa7
|
# Post Operative
The [PostOperative dataset](https://archive-beta.ics.uci.edu/dataset/82/post+operative+patient) from the [UCI repository](https://archive-beta.ics.uci.edu/).
Should the patient be discharged from the hospital, go to the ground floor, or to the ICU?
# Configurations and tasks
| **Configuration** | **Task** |
|-----------------------|---------------------------|
| post_operative | Multiclass classification.|
| post_operative_binary | Binary classification. |
|
mstz/post_operative
|
[
"task_categories:tabular-classification",
"size_categories:1K<n<10K",
"language:en",
"license:cc",
"post_operative",
"tabular_classification",
"binary_classification",
"multiclass_classification",
"UCI",
"region:us"
] |
2023-04-13T09:31:10+00:00
|
{"language": ["en"], "license": "cc", "size_categories": ["1K<n<10K"], "task_categories": ["tabular-classification"], "pretty_name": "Page Blocks", "tags": ["post_operative", "tabular_classification", "binary_classification", "multiclass_classification", "UCI"], "configs": ["post_operative", "post_operative_binary"]}
|
2023-04-16T16:58:06+00:00
|
9b275d8930a69147df3f2fdfc870daa24e18dbbd
|
# Dataset Card for "autotrain-data-imgtestadv1"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
abhishek/autotrain-data-imgtestadv1
|
[
"region:us"
] |
2023-04-13T09:37:17+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "daisy", "1": "dandelion", "2": "rose", "3": "sunflower", "4": "tulip"}}}}], "splits": [{"name": "train", "num_bytes": 114899554.104, "num_examples": 2196}, {"name": "validation", "num_bytes": 33595969.0, "num_examples": 550}], "download_size": 167066023, "dataset_size": 148495523.104}}
|
2023-04-13T09:37:58+00:00
|
47417be9f8359d5db27bd9a0d264a5be53044c3c
|
# Post Operative
The [Seeds dataset](https://archive-beta.ics.uci.edu/dataset/236/seeds) from the [UCI repository](https://archive-beta.ics.uci.edu/).
# Configurations and tasks
| **Configuration** | **Task** | **Description** |
|-----------------------|---------------------------|-------------------------|
| seeds | Multiclass classification.| |
| seeds_0 | Binary classification. | Is the seed of class 0? |
| seeds_1 | Binary classification. | Is the seed of class 1? |
| seeds_2 | Binary classification. | Is the seed of class 2? |
|
mstz/seeds
|
[
"task_categories:tabular-classification",
"size_categories:1K<n<10K",
"language:en",
"license:cc",
"seeds",
"tabular_classification",
"binary_classification",
"multiclass_classification",
"UCI",
"region:us"
] |
2023-04-13T09:55:57+00:00
|
{"language": ["en"], "license": "cc", "size_categories": ["1K<n<10K"], "task_categories": ["tabular-classification"], "pretty_name": "Page Blocks", "tags": ["seeds", "tabular_classification", "binary_classification", "multiclass_classification", "UCI"], "configs": ["seeds", "seeds_binary"]}
|
2023-04-16T16:58:19+00:00
|
991aac7d57bdda84710f809604e6f5c504d200e8
|
# Dataset Card for "amazon_tokenized"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
guangyil/amazon_tokenized
|
[
"region:us"
] |
2023-04-13T10:12:52+00:00
|
{"dataset_info": {"features": [{"name": "bert_token", "sequence": "int64"}, {"name": "gpt2_token", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 173553456.7202345, "num_examples": 551455}, {"name": "test", "num_bytes": 261864.0, "num_examples": 1000}], "download_size": 42652803, "dataset_size": 173815320.7202345}}
|
2023-04-13T10:13:18+00:00
|
78d1abecb96e7e01aa6f5fb5ffc900142f8a7ce4
|
fagenorn/cuco-dataset
|
[
"task_categories:text-to-image",
"annotations_creators:machine-generated",
"language_creators:other",
"multilinguality:monolingual",
"size_categories:n<1K",
"language:en",
"region:us"
] |
2023-04-13T10:15:18+00:00
|
{"annotations_creators": ["machine-generated"], "language_creators": ["other"], "language": ["en"], "multilinguality": ["monolingual"], "size_categories": ["n<1K"], "task_categories": ["text-to-image"], "task_ids": [], "pretty_name": "CuCo Style", "tags": []}
|
2023-04-13T19:58:42+00:00
|
|
ba4bf730b0db798ba1d0d8868b7bb9a46b117903
|
# Dataset Card for "chunk_226"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_226
|
[
"region:us"
] |
2023-04-13T10:18:03+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 21978087552.0, "num_examples": 228824}], "download_size": 18662581812, "dataset_size": 21978087552.0}}
|
2023-04-13T10:38:41+00:00
|
193dbeda49034b52a808869110c84e81cf7f9748
|
# Dataset Card for "chunk_234"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_234
|
[
"region:us"
] |
2023-04-13T10:42:57+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 21436953120.25, "num_examples": 223190}], "download_size": 19519832702, "dataset_size": 21436953120.25}}
|
2023-04-13T11:03:08+00:00
|
5f466e5af11fb82ea610acc5cf983dcb3a025840
|
# Dataset Card for "databricks-dolly-15k-curated-multilingual"
A curated and multilingual version of the Databricks Dolly instructions dataset. It includes a programmatically and manually corrected version of the original `en` dataset. See below.
**STATUS**:
Currently, the original Dolly v2 English version has been curated combining automatic processing and collaborative human curation using Argilla (~400 records have been manually edited and fixed). The following graph shows a summary about the number of edited fields.

## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage: https://huggingface.co/datasets/argilla/databricks-dolly-15k-multilingual/**
- **Repository: https://huggingface.co/datasets/argilla/databricks-dolly-15k-multilingual/**
- **Paper:**
- **Leaderboard:**
- **Point of Contact: [email protected], https://github.com/argilla-io/argilla**
### Dataset Summary
This dataset collection is a curated and machine-translated version of the `databricks-dolly-15k` [dataset](https://github.com/databrickslabs/dolly/tree/master/data) originally created by Databricks, Inc. in 2023.
The goal is to give practitioners a starting point for training open-source instruction-following models with better-quality English data and translated data beyond English. However, as the translation quality will not be perfect, we highly recommend dedicating time to curate and fix translation issues. Below we explain how to load the datasets into [Argilla for data curation and fixing](https://github.com/argilla-io/argilla). Additionally, we'll be improving the datasets made available here, with the help of different communities.
Currently, the original English version has been curated combining automatic processing and collaborative human curation using Argilla (~400 records have been manually edited and fixed). The following graph shows a summary of the number of edited fields.
The main issues (likely many issues still remaining) are the following:
1. Some labelers misunderstood the usage of the `context` field. This `context` field is used as part of the prompt for instruction-tuning and in other works it's called `input` (e.g., Alpaca). Likely, the name context, has led to some labelers using it to provide the full context of where they have extracted the response. This is problematic for some types of tasks (summarization, closed-qa or information-extraction) because sometimes the context is shorter than or unrelated to summaries, or the information cannot be extracted from the context (closed-qa, information-extraction).
2. Some labelers misunderstood the way to give instructions for summarization or closed-qa, for example, they ask: Who is Thomas Jefferson? then provide a very long context and a response equally long.
We programmatically identified records with these potential issues and ran a campaign to fix it and as a result more than 400 records have been adapted. See below for statistics:

As a result of this curation process the content of the fields has been reduced, counted in number of tokens, especially for the responses:

If you want to browse and curate your dataset with Argilla, you can:
1. [Duplicate this Space](https://huggingface.co/spaces/argilla/dolly-multilingual-curation/settings?duplicate=true). IMPORTANT: The Space's Visibility need to be Public, but you can setup your own password and API KEYS [following this guide](https://docs.argilla.io/en/latest/getting_started/installation/deployments/huggingface-spaces.html#setting-up-secret-environment-variables).
2. Setup two secrets: `HF_TOKEN` and `LANG` for indicating the language split
3. Login with `admin`/`12345678` and start browsing and labelling.
4. Start labeling. Every 5 min the validations will be stored on a Hub dataset in your personal HF space.
5. Please get in touch to contribute fixes and improvements to the source datasets.
There's one split per language:
```python
from datasets import load_dataset
# loads all splits
load_dataset("argilla/databricks-dolly-15k-curate-multilingual")
# loads Spanish splits
load_dataset("argilla/databricks-dolly-15k-curated-multilingual", split="es")
```
### Supported Tasks and Leaderboards
As described in the README of the original dataset, this dataset can be used for:
* Training LLMs
* Synthetic Data Generation
* Data Augmentation
### Languages
Currently: `es`, `fr`, `de`, `en`
Join Argilla [Slack community](https://join.slack.com/t/rubrixworkspace/shared_invite/zt-whigkyjn-a3IUJLD7gDbTZ0rKlvcJ5g) if you want to help us include other languages.
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
There's one split per language:
```python
from datasets import load_dataset
# loads all splits
load_dataset("argilla/databricks-dolly-15k-multilingual")
# loads Spanish splits
load_dataset("argilla/databricks-dolly-15k-multilingual", split="es")
```
## Dataset Creation
These datasets have been translated using the DeepL API from the original English dataset between the 13th and 14th of April
### Curation Logbook
* 28/04/23: Removed references from Wikipedia copy pastes for 8113 rows. Applied to context and response fields with the following regex: `r'\[[\w]+\]'`
### Source Data
#### Initial Data Collection and Normalization
Refer to the [original dataset](https://github.com/databrickslabs/dolly/tree/master/data) for more information.
#### Who are the source language producers?
[More Information Needed]
### Annotations
Annotations are planned but not performed yet.
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
This dataset can be used for any purpose, whether academic or commercial, under the terms of the [Creative Commons Attribution-ShareAlike 3.0 Unported License](https://creativecommons.org/licenses/by-sa/3.0/legalcode).
**Original dataset Owner: Databricks, Inc.**
### Citation Information
[More Information Needed]
|
argilla/databricks-dolly-15k-curated-multilingual
|
[
"task_categories:text-generation",
"task_categories:text2text-generation",
"size_categories:10K<n<100K",
"language:es",
"language:de",
"language:fr",
"license:cc-by-sa-3.0",
"machine-translated",
"instruction-following",
"region:us"
] |
2023-04-13T11:18:17+00:00
|
{"language": ["es", "de", "fr"], "license": "cc-by-sa-3.0", "size_categories": ["10K<n<100K"], "task_categories": ["text-generation", "text2text-generation"], "pretty_name": "Databrick Dolly Instructions Multilingual", "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "response", "dtype": "string"}, {"name": "category", "dtype": "string"}, {"name": "instruction_original_en", "dtype": "string"}, {"name": "context_original_en", "dtype": "string"}, {"name": "response_original_en", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "de", "num_bytes": 25985140, "num_examples": 15015}, {"name": "en", "num_bytes": 24125109, "num_examples": 15015}, {"name": "es", "num_bytes": 25902709, "num_examples": 15015}, {"name": "fr", "num_bytes": 26704314, "num_examples": 15015}], "download_size": 65586669, "dataset_size": 102717272}, "tags": ["machine-translated", "instruction-following"]}
|
2023-06-14T06:47:54+00:00
|
5bd9bb0f9004af169abaf3abc62e0dbb9d641284
|
# Dataset Card for "chunk_225"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_225
|
[
"region:us"
] |
2023-04-13T11:22:17+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 21203076240.625, "num_examples": 220755}], "download_size": 20183788050, "dataset_size": 21203076240.625}}
|
2023-04-13T11:40:50+00:00
|
816e4f708875e41fcb647d961623137b88161b86
|
# Dataset Card for "casual_prompts"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
shahules786/casual_prompts
|
[
"region:us"
] |
2023-04-13T11:39:11+00:00
|
{"dataset_info": {"features": [{"name": "context", "dtype": "string"}, {"name": "response", "dtype": "null"}, {"name": "safety_label", "dtype": "string"}, {"name": "episode_done", "dtype": "bool"}, {"name": "rots", "sequence": "string"}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3528106, "num_examples": 26445}], "download_size": 1867677, "dataset_size": 3528106}}
|
2023-04-16T07:30:18+00:00
|
08b70d0bb277e95ff3beb73f795be43e2fa0f4b6
|
# Dataset Card for "chunk_223"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_223
|
[
"region:us"
] |
2023-04-13T11:43:07+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 23361658992.375, "num_examples": 243229}], "download_size": 22210053548, "dataset_size": 23361658992.375}}
|
2023-04-13T12:26:39+00:00
|
7f57cd4cbac63865609f04718f14acf43b888963
|
meowmeownig/meowdels
|
[
"license:creativeml-openrail-m",
"region:us"
] |
2023-04-13T11:49:34+00:00
|
{"license": "creativeml-openrail-m"}
|
2023-04-16T17:45:27+00:00
|
|
7f407c36a36294c69142f165edc22a8ade2f0749
|
# Dataset Card for "Bioasq7b"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
reginaboateng/Bioasq7b
|
[
"language:en",
"region:us"
] |
2023-04-13T11:55:34+00:00
|
{"language": "en", "dataset_info": {"features": [{"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "answers", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9973215.098861594, "num_examples": 6000}, {"name": "validation", "num_bytes": 1123648.9011384062, "num_examples": 676}], "download_size": 6069060, "dataset_size": 11096864.0}}
|
2023-07-13T12:55:58+00:00
|
3bbef28519d174906bf90604b05b371995be6c77
|
# Dataset Card for "Bioasq7b_list"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
reginaboateng/Bioasq7b_list
|
[
"region:us"
] |
2023-04-13T12:32:45+00:00
|
{"dataset_info": {"features": [{"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}], "splits": [{"name": "train", "num_bytes": 14557028, "num_examples": 8598}], "download_size": 2877034, "dataset_size": 14557028}}
|
2023-04-13T12:32:47+00:00
|
4f9c11e50889e68475d51e3dc7c57e16cd8699f6
|
# Dataset Card for "Bioasq7b_factoid"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
reginaboateng/Bioasq7b_factoid
|
[
"region:us"
] |
2023-04-13T12:38:14+00:00
|
{"dataset_info": {"features": [{"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}], "splits": [{"name": "train", "num_bytes": 8373638.251760881, "num_examples": 5000}, {"name": "validation", "num_bytes": 899328.7482391186, "num_examples": 537}], "download_size": 4489549, "dataset_size": 9272967.0}}
|
2023-04-13T12:38:18+00:00
|
0d9ade14f8419ee561d13dfae41834c2eb1610bf
|
# Dataset Card for "dolly_es_validation"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
dvilasuero/dolly_es_validation
|
[
"region:us"
] |
2023-04-13T12:45:55+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "null"}, {"name": "inputs", "struct": [{"name": "context", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "response", "dtype": "string"}]}, {"name": "prediction", "dtype": "null"}, {"name": "prediction_agent", "dtype": "null"}, {"name": "annotation", "dtype": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "vectors", "dtype": "null"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "struct": [{"name": "category", "dtype": "string"}, {"name": "context_en", "dtype": "string"}, {"name": "instruction_en", "dtype": "string"}, {"name": "response_en", "dtype": "string"}]}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 14166, "num_examples": 11}], "download_size": 0, "dataset_size": 14166}}
|
2023-04-18T08:31:00+00:00
|
60ee2d70a93d65e65845c51013079e4d4d253988
|
# Dataset Card for "solarModuleAnomaly"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
zklee98/solarModuleAnomaly
|
[
"region:us"
] |
2023-04-13T13:03:07+00:00
|
{"dataset_info": {"features": [{"name": "pixel_values", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 629107.0, "num_examples": 142}], "download_size": 92735, "dataset_size": 629107.0}}
|
2023-04-14T07:19:45+00:00
|
127f35b982b584f02742ddfc99d866abb9731021
|
HyperionHF/tom
|
[
"license:mit",
"region:us"
] |
2023-04-13T14:29:02+00:00
|
{"license": "mit", "dataset_info": {"features": [{"name": "agent", "dtype": "string"}, {"name": "agent_preferences", "sequence": "string"}, {"name": "chosen_object", "dtype": "string"}, {"name": "true_chosen_object", "dtype": "string"}, {"name": "chosen_object_is_noisy", "dtype": "bool"}, {"name": "other_objects", "sequence": "string"}, {"name": "location_chosen_object", "dtype": "int64"}, {"name": "location_true_chosen_object", "dtype": "int64"}, {"name": "preference_idx", "dtype": "int64"}, {"name": "preference_idx_true", "dtype": "int64"}, {"name": "example_idx", "dtype": "int64"}, {"name": "example_text", "dtype": "string"}, {"name": "target_idx", "dtype": "int64"}, {"name": "logical_representation", "struct": [{"name": "input", "sequence": "string"}, {"name": "target", "dtype": "string"}]}, {"name": "logical_representation_with_indirection", "struct": [{"name": "input", "sequence": "string"}, {"name": "target", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 5701970, "num_examples": 13046}, {"name": "dev", "num_bytes": 576759, "num_examples": 1320}, {"name": "test_own_preference", "num_bytes": 63618, "num_examples": 144}, {"name": "test_others_preference", "num_bytes": 60138, "num_examples": 144}, {"name": "test_location", "num_bytes": 170682, "num_examples": 396}, {"name": "test_distraction", "num_bytes": 64125, "num_examples": 150}], "download_size": 702278, "dataset_size": 6637292}}
|
2023-04-27T00:24:51+00:00
|
|
93341cdc452ca5cb1c4a3e82b619ee5b2e847985
|
Dolly 日本語翻訳版 このリポジトリは、Databricksが開発したdollyプロジェクトの日本語翻訳版です。
翻訳元 翻訳元のプロジェクトは以下のリンクで確認できます:
Dolly(英語版) ライセンスと帰属 Copyright (2023) Databricks, Inc. このデータセットはDatabricks (https://www.databricks.com) で開発され、CC BY-SA 3.0ライセンスに基づいて使用が許可されています。
データセットの一部のカテゴリには、以下のソースからの素材が含まれており、CC BY-SA 3.0ライセンスでライセンスされています:
ウィキペディア(様々なページ) - https://www.wikipedia.org/ Copyright © ウィキペディア編集者および投稿者。
この翻訳作品は、元のdollyプロジェクトがCC BY-SA 3.0で公開されているため、同じくCC BY-SA 3.0で公開しています。
詳細については、クリエイティブ・コモンズ 表示-継承 3.0ライセンスの下に提供されています。
あなたは以下の条件に従う場合に限り、自由に:
共有 — 本作品を複製、配布、展示、実演することができます。 リミックス — 本作品を改変することができます。 以下の条件が適用されます:
表示 — あなたは原著作者または許諾者が指定した方法で本作品の著作者または許諾者に帰属させることができます。 継承 — もしあなたが本作品を改変し、変形させたり、その基に新たな作品を創作する場合には、その結果生じた作品を本作品と同一の許諾条件の下で配布しなければなりません。 詳細については、ライセンスの全文をご覧ください。
|
takosama/databricks-dolly-15k-ja-google-trans
|
[
"size_categories:10K<n<100K",
"language:ja",
"license:cc-by-3.0",
"region:us"
] |
2023-04-13T14:38:17+00:00
|
{"language": ["ja"], "license": "cc-by-3.0", "size_categories": ["10K<n<100K"]}
|
2023-04-13T16:18:21+00:00
|
fdf72ae0827c1cda404aff25b6603abec9e3399b
|
# OpenAssistant Conversations Dataset (OASST1)
## Dataset Description
- **Homepage:** https://www.open-assistant.io/
- **Repository:** https://github.com/LAION-AI/Open-Assistant
- **Paper:** https://arxiv.org/abs/2304.07327
### Dataset Summary
In an effort to democratize research on large-scale alignment, we release OpenAssistant
Conversations (OASST1), a human-generated, human-annotated assistant-style conversation
corpus consisting of 161,443 messages in 35 different languages, annotated with 461,292
quality ratings, resulting in over 10,000 fully annotated conversation trees. The corpus
is a product of a worldwide crowd-sourcing effort involving over 13,500 volunteers.
Please refer to our [paper](https://arxiv.org/abs/2304.07327) for further details.
### Dataset Structure
This dataset contains message trees. Each message tree has an initial prompt message as the root node,
which can have multiple child messages as replies, and these child messages can have multiple replies.
All messages have a role property: this can either be "assistant" or "prompter". The roles in
conversation threads from prompt to leaf node strictly alternate between "prompter" and "assistant".
This version of the dataset contains data collected on the [open-assistant.io](https://open-assistant.io/) website until April 12 2023.
### JSON Example: Message
For readability, the following JSON examples are shown formatted with indentation on multiple lines.
Objects are stored without indentation (on single lines) in the actual jsonl files.
```json
{
"message_id": "218440fd-5317-4355-91dc-d001416df62b",
"parent_id": "13592dfb-a6f9-4748-a92c-32b34e239bb4",
"user_id": "8e95461f-5e94-4d8b-a2fb-d4717ce973e4",
"text": "It was the winter of 2035, and artificial intelligence (..)",
"role": "assistant",
"lang": "en",
"review_count": 3,
"review_result": true,
"deleted": false,
"rank": 0,
"synthetic": true,
"model_name": "oasst-sft-0_3000,max_new_tokens=400 (..)",
"labels": {
"spam": { "value": 0.0, "count": 3 },
"lang_mismatch": { "value": 0.0, "count": 3 },
"pii": { "value": 0.0, "count": 3 },
"not_appropriate": { "value": 0.0, "count": 3 },
"hate_speech": { "value": 0.0, "count": 3 },
"sexual_content": { "value": 0.0, "count": 3 },
"quality": { "value": 0.416, "count": 3 },
"toxicity": { "value": 0.16, "count": 3 },
"humor": { "value": 0.0, "count": 3 },
"creativity": { "value": 0.33, "count": 3 },
"violence": { "value": 0.16, "count": 3 }
}
}
```
### JSON Example: Conversation Tree
For readability, only a subset of the message properties is shown here.
```json
{
"message_tree_id": "14fbb664-a620-45ce-bee4-7c519b16a793",
"tree_state": "ready_for_export",
"prompt": {
"message_id": "14fbb664-a620-45ce-bee4-7c519b16a793",
"text": "Why can't we divide by 0? (..)",
"role": "prompter",
"lang": "en",
"replies": [
{
"message_id": "894d30b6-56b4-4605-a504-89dd15d4d1c8",
"text": "The reason we cannot divide by zero is because (..)",
"role": "assistant",
"lang": "en",
"replies": [
// ...
]
},
{
"message_id": "84d0913b-0fd9-4508-8ef5-205626a7039d",
"text": "The reason that the result of a division by zero is (..)",
"role": "assistant",
"lang": "en",
"replies": [
{
"message_id": "3352725e-f424-4e3b-a627-b6db831bdbaa",
"text": "Math is confusing. Like those weird Irrational (..)",
"role": "prompter",
"lang": "en",
"replies": [
{
"message_id": "f46207ca-3149-46e9-a466-9163d4ce499c",
"text": "Irrational numbers are simply numbers (..)",
"role": "assistant",
"lang": "en",
"replies": []
},
// ...
]
}
]
}
]
}
}
```
Please refer to [oasst-data](https://github.com/LAION-AI/Open-Assistant/tree/main/oasst-data) for
details about the data structure and Python code to read and write jsonl files containing oasst data objects.
If you would like to explore the dataset yourself you can find a
[`getting-started`](https://github.com/LAION-AI/Open-Assistant/blob/main/notebooks/openassistant-oasst1/getting-started.ipynb)
notebook in the `notebooks/openassistant-oasst1` folder of the [LAION-AI/Open-Assistant](https://github.com/LAION-AI/Open-Assistant)
github repository.
## Main Dataset Files
Conversation data is provided either as nested messages in trees (extension `.trees.jsonl.gz`)
or as a flat list (table) of messages (extension `.messages.jsonl.gz`).
### Ready For Export Trees
```
2023-04-12_oasst_ready.trees.jsonl.gz 10,364 trees with 88,838 total messages
2023-04-12_oasst_ready.messages.jsonl.gz 88,838 messages
```
Trees in `ready_for_export` state without spam and deleted messages including message labels.
The oasst_ready-trees file usually is sufficient for supervised fine-tuning (SFT) & reward model (RM) training.
### All Trees
```
2023-04-12_oasst_all.trees.jsonl.gz 66,497 trees with 161,443 total messages
2023-04-12_oasst_all.messages.jsonl.gz 161,443 messages
```
All trees, including those in states `prompt_lottery_waiting` (trees that consist of only one message, namely the initial prompt),
`aborted_low_grade` (trees that stopped growing because the messages had low quality), and `halted_by_moderator`.
### Supplemental Exports: Spam & Prompts
```
2023-04-12_oasst_spam.messages.jsonl.gz
```
These are messages which were deleted or have a negative review result (`"review_result": false`).
Besides low quality, a frequent reason for message deletion is a wrong language tag.
```
2023-04-12_oasst_prompts.messages.jsonl.gz
```
These are all the kept initial prompt messages with positive review result (no spam) of trees in `ready_for_export` or `prompt_lottery_waiting` state.
### Using the Huggingface Datasets
While HF datasets is ideal for tabular datasets, it is not a natural fit for nested data structures like the OpenAssistant conversation trees.
Nevertheless, we make all messages which can also be found in the file `2023-04-12_oasst_ready.trees.jsonl.gz` available in parquet as train/validation splits.
These are directly loadable by [Huggingface Datasets](https://pypi.org/project/datasets/).
To load the oasst1 train & validation splits use:
```python
from datasets import load_dataset
ds = load_dataset("OpenAssistant/oasst1")
train = ds['train'] # len(train)=84437 (95%)
val = ds['validation'] # len(val)=4401 (5%)
```
The messages appear in depth-first order of the message trees.
Full conversation trees can be reconstructed from the flat messages table by using the `parent_id`
and `message_id` properties to identify the parent-child relationship of messages. The `message_tree_id`
and `tree_state` properties (only present in flat messages files) can be used to find all messages of a message tree or to select trees by their state.
### Languages
OpenAssistant Conversations incorporates 35 different languages with a distribution of messages as follows:
**Languages with over 1000 messages**
- English: 71956
- Spanish: 43061
- Russian: 9089
- German: 5279
- Chinese: 4962
- French: 4251
- Thai: 3042
- Portuguese (Brazil): 2969
- Catalan: 2260
- Korean: 1553
- Ukrainian: 1352
- Italian: 1320
- Japanese: 1018
<details>
<summary><b>Languages with under 1000 messages</b></summary>
<ul>
<li>Vietnamese: 952</li>
<li>Basque: 947</li>
<li>Polish: 886</li>
<li>Hungarian: 811</li>
<li>Arabic: 666</li>
<li>Dutch: 628</li>
<li>Swedish: 512</li>
<li>Turkish: 454</li>
<li>Finnish: 386</li>
<li>Czech: 372</li>
<li>Danish: 358</li>
<li>Galician: 339</li>
<li>Hebrew: 255</li>
<li>Romanian: 200</li>
<li>Norwegian Bokmål: 133</li>
<li>Indonesian: 115</li>
<li>Bulgarian: 95</li>
<li>Bengali: 82</li>
<li>Persian: 72</li>
<li>Greek: 66</li>
<li>Esperanto: 59</li>
<li>Slovak: 19</li>
</ul>
</details>
## Contact
- Discord [Open Assistant Discord Server](https://ykilcher.com/open-assistant-discord)
- GitHub: [LAION-AI/Open-Assistant](https://github.com/LAION-AI/Open-Assistant)
- E-Mail: [[email protected]](mailto:[email protected])
|
OpenAssistant/oasst1
|
[
"size_categories:100K<n<1M",
"language:en",
"language:es",
"language:ru",
"language:de",
"language:pl",
"language:th",
"language:vi",
"language:sv",
"language:bn",
"language:da",
"language:he",
"language:it",
"language:fa",
"language:sk",
"language:id",
"language:nb",
"language:el",
"language:nl",
"language:hu",
"language:eu",
"language:zh",
"language:eo",
"language:ja",
"language:ca",
"language:cs",
"language:bg",
"language:fi",
"language:pt",
"language:tr",
"language:ro",
"language:ar",
"language:uk",
"language:gl",
"language:fr",
"language:ko",
"license:apache-2.0",
"human-feedback",
"arxiv:2304.07327",
"region:us"
] |
2023-04-13T14:48:16+00:00
|
{"language": ["en", "es", "ru", "de", "pl", "th", "vi", "sv", "bn", "da", "he", "it", "fa", "sk", "id", "nb", "el", "nl", "hu", "eu", "zh", "eo", "ja", "ca", "cs", "bg", "fi", "pt", "tr", "ro", "ar", "uk", "gl", "fr", "ko"], "license": "apache-2.0", "size_categories": ["100K<n<1M"], "pretty_name": "OpenAssistant Conversations", "dataset_info": {"features": [{"name": "message_id", "dtype": "string"}, {"name": "parent_id", "dtype": "string"}, {"name": "user_id", "dtype": "string"}, {"name": "created_date", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "role", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "review_count", "dtype": "int32"}, {"name": "review_result", "dtype": "bool"}, {"name": "deleted", "dtype": "bool"}, {"name": "rank", "dtype": "int32"}, {"name": "synthetic", "dtype": "bool"}, {"name": "model_name", "dtype": "string"}, {"name": "detoxify", "struct": [{"name": "toxicity", "dtype": "float64"}, {"name": "severe_toxicity", "dtype": "float64"}, {"name": "obscene", "dtype": "float64"}, {"name": "identity_attack", "dtype": "float64"}, {"name": "insult", "dtype": "float64"}, {"name": "threat", "dtype": "float64"}, {"name": "sexual_explicit", "dtype": "float64"}]}, {"name": "message_tree_id", "dtype": "string"}, {"name": "tree_state", "dtype": "string"}, {"name": "emojis", "sequence": [{"name": "name", "dtype": "string"}, {"name": "count", "dtype": "int32"}]}, {"name": "labels", "sequence": [{"name": "name", "dtype": "string"}, {"name": "value", "dtype": "float64"}, {"name": "count", "dtype": "int32"}]}], "splits": [{"name": "train", "num_bytes": 100367999, "num_examples": 84437}, {"name": "validation", "num_bytes": 5243405, "num_examples": 4401}], "download_size": 41596430, "dataset_size": 105611404}, "tags": ["human-feedback"]}
|
2023-05-02T12:21:21+00:00
|
b4292f42d873019a02b43f1f20be975d5b1fbf9e
|
# Dataset Card for "tax-convos-sample"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mdacampora/tax-convos-sample
|
[
"region:us"
] |
2023-04-13T15:08:59+00:00
|
{"dataset_info": {"features": [{"name": "customer", "struct": [{"name": "employee_id", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "social_security_number", "dtype": "string"}]}, {"name": "problem", "dtype": "string"}, {"name": "transcript", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2443, "num_examples": 4}], "download_size": 8150, "dataset_size": 2443}}
|
2023-04-13T15:09:01+00:00
|
7411ecae10464a63e359bb736c3f65e949a18919
|
# Dataset Card for "socialmedia-abuse"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
darksam/socialmedia-abuse
|
[
"region:us"
] |
2023-04-13T15:24:29+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neg", "1": "pos"}}}}], "splits": [{"name": "train", "num_bytes": 1074806, "num_examples": 8530}], "download_size": 698844, "dataset_size": 1074806}}
|
2023-04-13T15:24:31+00:00
|
7903c028cc139c64605fe1c72caccb0ec211ebb2
|
# Dataset Card for "socialmedia-abuse2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
darksam/socialmedia-abuse2
|
[
"region:us"
] |
2023-04-13T15:24:44+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neg", "1": "pos"}}}}], "splits": [{"name": "train", "num_bytes": 1074806, "num_examples": 8530}], "download_size": 0, "dataset_size": 1074806}}
|
2023-04-13T15:24:52+00:00
|
8e34dcb28c865c316a1625d9d09deedf1b0f45e5
|
# Dataset Card for "chunk_241"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_241
|
[
"region:us"
] |
2023-04-13T15:26:22+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 27418438368.75, "num_examples": 285466}], "download_size": 25539245911, "dataset_size": 27418438368.75}}
|
2023-04-13T15:45:15+00:00
|
20022500701bb15dd253d391fe84719896db0ea2
|
# Dataset Card for "chunk_239"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_239
|
[
"region:us"
] |
2023-04-13T15:44:20+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 28079536752.375, "num_examples": 292349}], "download_size": 26155516370, "dataset_size": 28079536752.375}}
|
2023-04-13T16:09:27+00:00
|
06ada106ba54ce46f57e12af0868aa476d4398ef
|
# Dataset Card for "final_training_set_v1"
Finetuning datasets for [WangChanGLM](https://github.com/pythainlp/wangchanglm) sourced from [LAION OIG chip2 and infill_dbpedia](https://huggingface.co/datasets/laion/OIG) ([Apache-2.0](https://github.com/pythainlp/wangchanglm/blob/main/LICENSE)), [DataBricks Dolly v2](https://github.com/databrickslabs/dolly) ([Apache-2.0](https://github.com/pythainlp/wangchanglm/blob/main/LICENSE)), [OpenAI TL;DR](https://github.com/openai/summarize-from-feedback) ([MIT](https://opensource.org/license/mit/)), and [Hello-SimpleAI HC3](https://huggingface.co/datasets/Hello-SimpleAI/HC3) ([CC-BY SA](https://creativecommons.org/licenses/by-sa/4.0/))
|
pythainlp/final_training_set_v1
|
[
"task_categories:conversational",
"task_categories:text-generation",
"language:en",
"region:us"
] |
2023-04-13T15:52:49+00:00
|
{"language": ["en"], "task_categories": ["conversational", "text-generation"], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "metadata", "struct": [{"name": "source", "dtype": "string"}]}, {"name": "nb_token", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 337155434.9768474, "num_examples": 405760}, {"name": "test", "num_bytes": 1277960.0231525812, "num_examples": 1538}], "download_size": 191404581, "dataset_size": 338433395}}
|
2023-04-29T06:06:04+00:00
|
3d89f4206cca436863de11360d6df8d4aeae24ad
|
# Dataset Card for "riffdata-002"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
gafotech/riffdata-002
|
[
"region:us"
] |
2023-04-13T15:54:40+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2520348514.392, "num_examples": 20576}], "download_size": 2507840852, "dataset_size": 2520348514.392}}
|
2023-04-13T17:50:30+00:00
|
3fba1770d9b15172c82313a4a9a27e568384f498
|
# Dataset Card for "docvqa_train_and_val"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
prashanthpillai/docvqa_train_and_val
|
[
"region:us"
] |
2023-04-13T16:19:13+00:00
|
{"dataset_info": {"features": [{"name": "questionId", "dtype": "int64"}, {"name": "question", "dtype": "string"}, {"name": "image", "sequence": {"sequence": {"sequence": "uint8"}}}, {"name": "docId", "dtype": "int64"}, {"name": "ucsf_document_id", "dtype": "string"}, {"name": "ucsf_document_page_no", "dtype": "string"}, {"name": "answers", "sequence": "string"}, {"name": "data_split", "dtype": "string"}, {"name": "words", "sequence": "string"}, {"name": "boxes", "sequence": {"sequence": "int64"}}], "splits": [{"name": "val", "num_bytes": 869361798, "num_examples": 5349}, {"name": "train", "num_bytes": 6381793673, "num_examples": 39454}], "download_size": 2578887111, "dataset_size": 7251155471}}
|
2023-04-13T16:29:28+00:00
|
677719eb6653458279b9f47aacf54f3a6e61f55c
|
# Dataset Card for "docvqa_test"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
prashanthpillai/docvqa_test
|
[
"region:us"
] |
2023-04-13T16:29:28+00:00
|
{"dataset_info": {"features": [{"name": "questionId", "dtype": "int64"}, {"name": "question", "dtype": "string"}, {"name": "image", "sequence": {"sequence": {"sequence": "uint8"}}}, {"name": "docId", "dtype": "int64"}, {"name": "ucsf_document_id", "dtype": "string"}, {"name": "ucsf_document_page_no", "dtype": "string"}, {"name": "data_split", "dtype": "string"}, {"name": "words", "sequence": "string"}, {"name": "boxes", "sequence": {"sequence": "int64"}}], "splits": [{"name": "test", "num_bytes": 843083964, "num_examples": 5188}], "download_size": 296859136, "dataset_size": 843083964}}
|
2023-04-13T16:30:48+00:00
|
d8710d01cd531a401e6a38b185294ca28d11b0d4
|
# Dataset Card for "chunk_242"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_242
|
[
"region:us"
] |
2023-04-13T16:39:46+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 18189474192.625, "num_examples": 189379}], "download_size": 15770036330, "dataset_size": 18189474192.625}}
|
2023-04-13T16:57:36+00:00
|
ac05d9d0c4ea2925e5c0a966f7f6ac33ef8da305
|
raquelh/wedding-pic
|
[
"license:afl-3.0",
"region:us"
] |
2023-04-13T16:52:43+00:00
|
{"license": "afl-3.0"}
|
2023-04-13T16:52:43+00:00
|
|
c116a99a055fcb830ce05f4df32bcfb5ad66da16
|
# To get access, you'll need to provide compelling reason,
# you can contact me at: [email protected]
# Since this database is WIP for future GPT learning.
Dataset made out of subset: "large_all" from https://huggingface.co/datasets/poloclub/diffusiondb.
Filtered from:
- duplicate lines
- blank lines
- lines with single word
- mathbold and similiar Unicode
- http/s links
- unprintable/binary characters
- NUL characters
- multiple whitespaces together
- special characters like: ( [ { @ # $ % < etc.
+
other small stuff
|
davehornik/diffusionDB_filtered_prompts
|
[
"task_categories:tabular-classification",
"size_categories:1M<n<10M",
"language:en",
"license:mit",
"art",
"region:us"
] |
2023-04-13T16:54:19+00:00
|
{"language": ["en"], "license": "mit", "size_categories": ["1M<n<10M"], "task_categories": ["tabular-classification"], "pretty_name": "diffusionDB Prompts - 1,8mil", "tags": ["art"]}
|
2023-05-17T10:09:26+00:00
|
0fad2effd0500261f84eaccbb0b0b91a9a0be2f7
|
houck2040/agri
|
[
"license:mit",
"region:us"
] |
2023-04-13T16:56:18+00:00
|
{"license": "mit"}
|
2023-04-13T16:58:30+00:00
|
|
6dd5a2361d181a4cc6bd19d5a4bb917fb288be39
|
This is the Spanish version of Winogrande Small (640 instances) for training only.
The translation was done manually by a group of experts. The dataset will still be improved in the future.
we also acknowledge Somos-NLP for this achievement.
|
hackathon-somos-nlp-2023/winogrande_train_s_spanish
|
[
"task_categories:text-classification",
"size_categories:n<1K",
"language:es",
"license:gpl-3.0",
"region:us"
] |
2023-04-13T16:56:35+00:00
|
{"language": ["es"], "license": "gpl-3.0", "size_categories": ["n<1K"], "task_categories": ["text-classification"], "pretty_name": "Winogrande in Spanish"}
|
2023-04-14T18:40:59+00:00
|
27f87481c2fb973135eddc6120619af422bd2554
|
Dzeniks/fever-nei-wiki-based
|
[
"license:mit",
"region:us"
] |
2023-04-13T17:06:15+00:00
|
{"license": "mit"}
|
2023-04-13T17:36:19+00:00
|
|
0734cbf047170cfc688c400de0da08f58b487ea2
|
Dzeniks/fever-nei-no-wiki-based
|
[
"license:mit",
"region:us"
] |
2023-04-13T17:10:32+00:00
|
{"license": "mit"}
|
2023-04-13T17:35:12+00:00
|
|
78f4648db350558010ccf55b769e37da70d1e51e
|
# Lofi Hip Hop
A bunch of lofi hip hop mp3s for machine learning purposes. Royalty free.
|
jacksonkstenger/lofiHipHop
|
[
"language:en",
"region:us"
] |
2023-04-13T17:14:24+00:00
|
{"language": ["en"]}
|
2023-04-13T18:22:11+00:00
|
e782e63015f92b4b246aa24d1e383282edd0f18d
|
# Dataset Card for "chunk_238"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_238
|
[
"region:us"
] |
2023-04-13T17:24:19+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 25267635504.875, "num_examples": 263073}], "download_size": 22472258365, "dataset_size": 25267635504.875}}
|
2023-04-13T18:04:20+00:00
|
8b80f561b4bbd692bc204031a7520aad51043fbe
|
# Dataset Card for "polish_names"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
marcus2000/polish_names
|
[
"region:us"
] |
2023-04-13T18:18:11+00:00
|
{"dataset_info": {"features": [{"name": "0", "dtype": "string"}, {"name": "1", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13695, "num_examples": 572}, {"name": "test", "num_bytes": 1549, "num_examples": 64}], "download_size": 15128, "dataset_size": 15244}}
|
2023-04-13T18:40:05+00:00
|
259311caabfdf6de24016a32d5512d9c9f1e53ee
|
# Dataset Card
**Paper**: On the Challenges of Using Black-Box APIs for Toxicity Evaluation in Research
**Abstract**: Perception of toxicity evolves over time and often differs between geographies and cultural backgrounds. Similarly, black-box commercially available APIs for detecting toxicity, such as the Perspective API, are not static, but frequently retrained to address any unattended weaknesses and biases. We evaluate the implications of these changes on the reproducibility of findings that compare the relative merits of models and methods that aim to curb toxicity. Our findings suggest that research that relied on inherited automatic toxicity scores to compare models and techniques may have resulted in inaccurate findings. Rescoring all models from HELM, a widely respected living benchmark, for toxicity with the recent version of the API led to a different ranking of extensively used models. We suggest caution in applying apples-to-apples comparisons between studies and lay recommendations for a more structured approach to evaluating toxicity over time.
Published on the [Trustworthy and Reliable Large-Scale Machine Learning Models ICLR 2023 Workshop](https://rtml-iclr2023.github.io/cfp.html).
[[Code]](https://github.com/for-ai/black-box-api-challenges) [[OpenReview]](https://openreview.net/forum?id=bRDHL4J5vy) [[Extended Pre-print]]()
## Dataset Description
In this repo are the data from the paper "On the challenges of using black-box APIs for toxicity evaluation in research".
In the folders you can find:
- **real-toxicity-prompts:** prompts from the RealToxicityPrompts dataset rescored with Perspective API in February 2023.
- **helm:** prompts and continuations from the HELM benchmark v0.2.2 rescored with Perspective API on April 2023. Also, in that folder we have the original stats from each of the models as scraped from the website.
- **dexperts:** prompts and continuations from a few models from the DExperts paper. Rescored with Perspective API on February 2023.
- **uddia:** continuations from UDDIA models. Rescored with Perspective API on February 2023.
### RealToxicityPrompts
RealToxicityPrompts is a dataset of 100k sentence snippets from the web for researchers to further address the risk of neural toxic degeneration in models.
- **Homepage:** [Toxic Degeneration homepage](https://toxicdegeneration.allenai.org/)
- **Repository:** [Code repository](https://github.com/allenai/real-toxicity-prompts)
- **Paper:** [RealToxicityPrompts: Evaluating Neural Toxic Degeneration in Language Models](https://arxiv.org/abs/2009.11462)
### HELM
- **Homepage:** [HELM Benchmark](https://crfm.stanford.edu/helm/latest/)
- **Repository:** [Code repository](https://github.com/stanford-crfm/helm)
- **Paper:** [Holistic Evaluation of Language Models](https://arxiv.org/abs/2211.09110)
### DExperts
- **Repository:** [Code repository](https://github.com/alisawuffles/DExperts)
- **Paper:** [DExperts: Decoding-Time Controlled Text Generation with Experts and Anti-Experts](https://arxiv.org/abs/2105.03023)
### UDDIA
- **Paper:** [Unified Detoxifying and Debiasing in Language Generation via Inference-time Adaptive Optimization](https://arxiv.org/abs/2210.04492)
# Citation
```
@inproceedings{
pozzobon2023on,
title={On the Challenges of Using Black-Box {API}s for Toxicity Evaluation in Research},
author={Luiza Amador Pozzobon and Beyza Ermis and Patrick Lewis and Sara Hooker},
booktitle={ICLR 2023 Workshop on Trustworthy and Reliable Large-Scale Machine Learning Models },
year={2023},
url={https://openreview.net/forum?id=bRDHL4J5vy}
}
```
|
CohereForAI/black-box-api-challenges
|
[
"task_categories:text-classification",
"task_categories:text-generation",
"language:en",
"license:apache-2.0",
"toxicity",
"text",
"nlp",
"fairness",
"arxiv:2009.11462",
"arxiv:2211.09110",
"arxiv:2105.03023",
"arxiv:2210.04492",
"region:us"
] |
2023-04-13T18:33:00+00:00
|
{"language": ["en"], "license": "apache-2.0", "task_categories": ["text-classification", "text-generation"], "pretty_name": "On the challenges of using black-box APIs for toxicity evaluation in research", "tags": ["toxicity", "text", "nlp", "fairness"]}
|
2023-04-19T16:58:52+00:00
|
e73fd078adb24011bf97c4374befa958d4a11ae4
|
# Dataset Card for "chunk_243"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_243
|
[
"region:us"
] |
2023-04-13T18:35:26+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 19325433888.25, "num_examples": 201206}], "download_size": 16897007958, "dataset_size": 19325433888.25}}
|
2023-04-13T18:51:33+00:00
|
64aad26a4d957bac679bfae1346020e1679a3e96
|
juandi79/atenfart
|
[
"license:apache-2.0",
"region:us"
] |
2023-04-13T18:41:20+00:00
|
{"license": "apache-2.0"}
|
2023-04-13T18:51:05+00:00
|
|
930e1769650f667d4bc921b63320930986dea893
|
# Dataset Card for Dataset Name
## Dataset Description
- **Homepage:**
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1).
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
[More Information Needed]
|
suschi1993/fomc-draft-v0
|
[
"task_categories:text-classification",
"size_categories:1K<n<10K",
"language:en",
"region:us"
] |
2023-04-13T18:58:50+00:00
|
{"language": ["en"], "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "pretty_name": "fomc-draft-v0"}
|
2023-05-02T14:44:52+00:00
|
c65962a80aadd7a2d90a9c2730ae4cb07e31b307
|
# Dataset Card for Instruct-Snippet-MLSUM-500
### Dataset Summary
This is a dataset for multitask instruction finetuning dataset for the task of news snippet generation. It is built from a sample of ~500 news articles from the [MLSUM](https://huggingface.co/datasets/mlsum) dataset, augmented with machine generated news snippets.
### Supported Tasks
This dataset was created to support the task of generating news snippets such as title, teaser, keywords, serp and tweet for news articles in German language.
### Languages
de - German
## Dataset Structure
lable: a string feature.
instruction: a string feature.
input: a string feature.
output: a string feature.
## Dataset Creation
This dataset was created from Snippet-MLSUM-500. See [Snippet-MLSUM-500](https://huggingface.co/datasets/snipaid/snippet-mlsum-500) for the dataset without instructions.
Instructions were generated with GPT-3.5 from a human-curated seed-set of instructions.
## Considerations for Using the Data
### Known Limitations
Part of the snippet data is machine generated. Be aware that these features (specifically: output) may exhibit signs of model hallucination, toxicity and stereotypes.
## Additional Information
See [Instruct-Snippet-MLSUM-500-V2](https://huggingface.co/datasets/snipaid/instruct-snippet-mlsum-500-v2) if you are interested in an improved successor, with further support for summaries.
### Licensing Information
This dataset is licensed under MIT license.
|
snipaid/instruct-snippet-mlsum
|
[
"task_categories:summarization",
"task_categories:text2text-generation",
"size_categories:1K<n<10K",
"language:de",
"license:mit",
"news",
"headline generation",
"teaser generation",
"keyword generation",
"tweet generation",
"serp title-tag generation",
"serp meta-description generation",
"news snippet generation",
"region:us"
] |
2023-04-13T19:00:40+00:00
|
{"language": "de", "license": "mit", "size_categories": ["1K<n<10K"], "task_categories": ["summarization", "text2text-generation"], "pretty_name": "Instruct-Snippet-MLSUM-500", "tags": ["news", "headline generation", "teaser generation", "keyword generation", "tweet generation", "serp title-tag generation", "serp meta-description generation", "news snippet generation"]}
|
2023-04-19T17:21:38+00:00
|
53ae41cbd04f972231ab46e985df0f55c4284833
|
LauraRuis/tom_rlhf
|
[
"task_categories:text-generation",
"size_categories:10K<n<100K",
"license:mit",
"region:us"
] |
2023-04-13T19:01:03+00:00
|
{"license": "mit", "size_categories": ["10K<n<100K"], "task_categories": ["text-generation"], "pretty_name": "tom"}
|
2023-04-13T19:03:22+00:00
|
|
baf473a15dc34f53ae184ea76f8ad4505f0e75ab
|
# Dataset Card for "chunk_231"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_231
|
[
"region:us"
] |
2023-04-13T19:12:56+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 23190981696.5, "num_examples": 241452}], "download_size": 21433081747, "dataset_size": 23190981696.5}}
|
2023-04-13T19:51:49+00:00
|
40689695f2f933f2de99d2262e1609c5e23dca36
|
# Dataset Card for "cova-coco-v2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
CreatlV/cova-coco-v2
|
[
"region:us"
] |
2023-04-13T19:17:26+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "image_id", "dtype": "string"}, {"name": "height", "dtype": "int64"}, {"name": "width", "dtype": "int64"}, {"name": "objects", "struct": [{"name": "bbox", "sequence": {"sequence": "float64"}}, {"name": "categories", "sequence": "int64"}]}, {"name": "annotations", "list": [{"name": "id", "dtype": "int64"}, {"name": "image_id", "dtype": "string"}, {"name": "category_id", "dtype": "int64"}, {"name": "segmentation", "sequence": "null"}, {"name": "area", "dtype": "float64"}, {"name": "bbox", "sequence": "float64"}, {"name": "iscrowd", "dtype": "int64"}, {"name": "attributes", "struct": [{"name": "occluded", "dtype": "bool"}, {"name": "rotation", "dtype": "float64"}]}]}, {"name": "pixel_values", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 4224301647.371447, "num_examples": 4531}, {"name": "val", "num_bytes": 1578402712.2047803, "num_examples": 1693}, {"name": "test", "num_bytes": 1413383645.4237726, "num_examples": 1516}], "download_size": 7207311109, "dataset_size": 7216088005.0}}
|
2023-04-13T19:42:16+00:00
|
fb6dd39746777f911777db2cea3e75b6757123a2
|
# Dataset Card for "tax-convos-sample2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mdacampora/tax-convos-sample2
|
[
"region:us"
] |
2023-04-13T19:21:46+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "turns", "list": [{"name": "role", "dtype": "string"}, {"name": "text", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 3823, "num_examples": 5}], "download_size": 4907, "dataset_size": 3823}}
|
2023-04-13T19:21:47+00:00
|
a46567b0210ffa8f932477338d30428ef9669f7e
|
# Dataset Card for "chunk_244"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_244
|
[
"region:us"
] |
2023-04-13T19:57:20+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 17593208208.625, "num_examples": 183171}], "download_size": 15207813211, "dataset_size": 17593208208.625}}
|
2023-04-13T20:08:05+00:00
|
c22c28b7a513aa77bccdcd26a3b3397782bf8533
|
# Dataset Card for "chunk_236"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_236
|
[
"region:us"
] |
2023-04-13T20:02:22+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 22843864224.25, "num_examples": 237838}], "download_size": 20332648847, "dataset_size": 22843864224.25}}
|
2023-04-13T20:17:11+00:00
|
c55811efdc03d5e55f3471c9ba0c3805fc682a0f
|
EarthnDusk/Slime_Tutorial_Beetlejuice_Lycoris
|
[
"license:creativeml-openrail-m",
"region:us"
] |
2023-04-13T20:03:07+00:00
|
{"license": "creativeml-openrail-m"}
|
2023-04-13T20:08:10+00:00
|
|
3cb18f77c69c17858dd8933cc02b52aae67688d1
|
# Dataset Card for "chunk_237"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
one-sec-cv12/chunk_237
|
[
"region:us"
] |
2023-04-13T20:03:31+00:00
|
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}], "splits": [{"name": "train", "num_bytes": 21874547808.75, "num_examples": 227746}], "download_size": 19436380894, "dataset_size": 21874547808.75}}
|
2023-04-13T20:17:49+00:00
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.