sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
listlengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
listlengths
0
25
languages
listlengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
listlengths
0
352
processed_texts
listlengths
1
353
tokens_length
listlengths
1
353
input_texts
listlengths
1
40
b12f9c74ed307fb3cbca59a5b4c6b92c56ee86d2
# Dataset Card for "dsum" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
thefluxapp/dsum
[ "region:us" ]
2023-11-06T20:41:31+00:00
{"dataset_info": {"features": [{"name": "dialogue", "dtype": "string"}, {"name": "summary", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 55615012.0, "num_examples": 54383}], "download_size": 32278282, "dataset_size": 55615012.0}}
2023-11-06T20:41:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dsum" More Information needed
[ "# Dataset Card for \"dsum\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dsum\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"dsum\"\n\nMore Information needed" ]
3794975f6b7465fa0e942945a63abc1cdf13a5dd
# Dataset Card for "xlsum_data-wiki_temario_results" rouge= {'rouge1': 0.18060656653313187, 'rouge2': 0.050040266839074324, 'rougeL': 0.11199372465907172, 'rougeLsum': 0.11199372465907172} bert= {'precision': 0.6379963795191735, 'recall': 0.7242612442737673, 'f1': 0.6780033153523967} mover = 0.5537799442916987
arthurmluz/xlsum_data-wiki_temario_results
[ "region:us" ]
2023-11-06T20:42:19+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 32109288, "num_examples": 7175}], "download_size": 19872325, "dataset_size": 32109288}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-13T20:34:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "xlsum_data-wiki_temario_results" rouge= {'rouge1': 0.18060656653313187, 'rouge2': 0.050040266839074324, 'rougeL': 0.11199372465907172, 'rougeLsum': 0.11199372465907172} bert= {'precision': 0.6379963795191735, 'recall': 0.7242612442737673, 'f1': 0.6780033153523967} mover = 0.5537799442916987
[ "# Dataset Card for \"xlsum_data-wiki_temario_results\"\n\nrouge= {'rouge1': 0.18060656653313187, 'rouge2': 0.050040266839074324, 'rougeL': 0.11199372465907172, 'rougeLsum': 0.11199372465907172}\n\nbert= {'precision': 0.6379963795191735, 'recall': 0.7242612442737673, 'f1': 0.6780033153523967}\n\nmover = 0.5537799442916987" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"xlsum_data-wiki_temario_results\"\n\nrouge= {'rouge1': 0.18060656653313187, 'rouge2': 0.050040266839074324, 'rougeL': 0.11199372465907172, 'rougeLsum': 0.11199372465907172}\n\nbert= {'precision': 0.6379963795191735, 'recall': 0.7242612442737673, 'f1': 0.6780033153523967}\n\nmover = 0.5537799442916987" ]
[ 6, 141 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"xlsum_data-wiki_temario_results\"\n\nrouge= {'rouge1': 0.18060656653313187, 'rouge2': 0.050040266839074324, 'rougeL': 0.11199372465907172, 'rougeLsum': 0.11199372465907172}\n\nbert= {'precision': 0.6379963795191735, 'recall': 0.7242612442737673, 'f1': 0.6780033153523967}\n\nmover = 0.5537799442916987" ]
62fa5cb6cd29030754f17156a727833e1df7fc8b
# Dataset Card for "esc50-clap2023-results" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
renumics/esc50-clap2023-results
[ "region:us" ]
2023-11-06T20:45:26+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "text_embedding", "sequence": "float64"}, {"name": "prediction", "dtype": {"class_label": {"names": {"0": "dog", "1": "rooster", "2": "pig", "3": "cow", "4": "frog", "5": "cat", "6": "hen", "7": "insects", "8": "sheep", "9": "crow", "10": "rain", "11": "sea_waves", "12": "crackling_fire", "13": "crickets", "14": "chirping_birds", "15": "water_drops", "16": "wind", "17": "pouring_water", "18": "toilet_flush", "19": "thunderstorm", "20": "crying_baby", "21": "sneezing", "22": "clapping", "23": "breathing", "24": "coughing", "25": "footsteps", "26": "laughing", "27": "brushing_teeth", "28": "snoring", "29": "drinking_sipping", "30": "door_wood_knock", "31": "mouse_click", "32": "keyboard_typing", "33": "door_wood_creaks", "34": "can_opening", "35": "washing_machine", "36": "vacuum_cleaner", "37": "clock_alarm", "38": "clock_tick", "39": "glass_breaking", "40": "helicopter", "41": "chainsaw", "42": "siren", "43": "car_horn", "44": "engine", "45": "train", "46": "church_bells", "47": "airplane", "48": "fireworks", "49": "hand_saw"}}}}, {"name": "entropy", "dtype": "float32"}, {"name": "audio_embedding", "sequence": "float32"}, {"name": "pred_incorrect", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 24616250, "num_examples": 2000}], "download_size": 13885284, "dataset_size": 24616250}}
2023-11-06T20:45:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "esc50-clap2023-results" More Information needed
[ "# Dataset Card for \"esc50-clap2023-results\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"esc50-clap2023-results\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"esc50-clap2023-results\"\n\nMore Information needed" ]
c03c0470b11cbc2ec4ca461b2354e8350bdd159f
# Dataset Card for "medicationqa_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
hippocrates/medicationqa_train
[ "region:us" ]
2023-11-06T20:55:56+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "conversations", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 443458, "num_examples": 690}], "download_size": 206863, "dataset_size": 443458}}
2023-11-06T20:55:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "medicationqa_train" More Information needed
[ "# Dataset Card for \"medicationqa_train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"medicationqa_train\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"medicationqa_train\"\n\nMore Information needed" ]
a33e0d3478bfe3bc38b31809b4e0db60291c0c35
# Dataset Card for "liveqa_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
hippocrates/liveqa_train
[ "region:us" ]
2023-11-06T21:00:15+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "conversations", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1155384, "num_examples": 635}], "download_size": 506374, "dataset_size": 1155384}}
2023-11-06T21:00:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "liveqa_train" More Information needed
[ "# Dataset Card for \"liveqa_train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"liveqa_train\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"liveqa_train\"\n\nMore Information needed" ]
7af1f65cfda81ec8b2772280e7636e277a8633da
# Dataset Card for "adoro_cinema_filmes" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
celsowm/adoro_cinema_filmes
[ "region:us" ]
2023-11-06T21:27:31+00:00
{"dataset_info": {"features": [{"name": "titulo", "dtype": "string"}, {"name": "sinopse", "dtype": "string"}, {"name": "generos", "sequence": "string"}, {"name": "link", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 23369140, "num_examples": 42918}], "download_size": 13807632, "dataset_size": 23369140}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-11T11:08:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "adoro_cinema_filmes" More Information needed
[ "# Dataset Card for \"adoro_cinema_filmes\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"adoro_cinema_filmes\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"adoro_cinema_filmes\"\n\nMore Information needed" ]
76811f5de053f720b7c216f0468bff0b691c4453
--- license: mit # Dataset Card **Developed by:** [More Information Needed] **Shared by [optional]:** [More Information Needed] **Dataset type:** [More Information Needed] **Language(s) (NLP):** [More Information Needed] **License:** [More Information Needed] **Derived from dataset [optional]:** [More Information Needed] **Dataset Sources [optional]** <!-- Provide the basic links for the dataset. --> **Repository:** [More Information Needed] **Paper [optional]:** [More Information Needed] **Demo [optional]:** [More Information Needed] **Uses** <!-- Address questions around how the dataset is intended to be used, including the foreseeable users of the dataset and those affected by the dataset. --> **Direct Use** <!-- This section is for the dataset use without modification or integration into a larger system. --> [More Information Needed] **Downstream Use [optional]** <!-- This section is for the dataset use when integrated or modified for a task, or when plugged into a larger ecosystem/app. --> [More Information Needed] **Out-of-Scope Use** <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] **Bias, Risks, and Limitations** <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] **Recommendations** <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. **How to Get Started with the Dataset** Use the code below to get started with the dataset. [More Information Needed] **Collection Details** **Source Data** <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the source data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] **Collection Procedures** <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the collection procedure. --> **Preprocessing [optional]** [More Information Needed] **Collection Hyperparameters** Collection regime: [More Information Needed] <!-- Details about the data collection process --> **Speeds, Sizes, Times [optional]** <!-- This section provides information about data size, collection start/end time, etc. --> [More Information Needed] **Evaluation** <!-- This section describes the evaluation protocols and provides the results. --> **Testing Data, Factors & Metrics** **Testing Data** <!-- This should link to a Dataset Card if possible. --> [More Information Needed] **Factors** <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] **Metrics** <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] **Results** [More Information Needed] **Summary** **Dataset Examination [optional]** <!-- Relevant analysis work for the dataset goes here --> [More Information Needed] **Environmental Impact** <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> **Hardware Type:** [More Information Needed] **Hours used:** [More Information Needed] **Cloud Provider:** [More Information Needed] **Compute Region:** [More Information Needed] **Carbon Emitted:** [More Information Needed] **Technical Specifications [optional]** **Dataset Structure and Objective** [More Information Needed] **Compute Infrastructure** [More Information Needed] **Hardware** [More Information Needed] **Software** [More Information Needed] **Citation [optional]** **BibTeX:** [More Information Needed] **APA:** [More Information Needed] **Glossary [optional]** <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] **More Information [optional]** [More Information Needed] **Dataset Card Authors [optional]** [More Information Needed] **Dataset Card Contact** [More Information Needed]
Taylor658/mqa1
[ "region:us" ]
2023-11-06T21:34:18+00:00
{}
2024-02-01T02:04:02+00:00
[]
[]
TAGS #region-us
--- license: mit # Dataset Card Developed by: Shared by [optional]: Dataset type: Language(s) (NLP): License: Derived from dataset [optional]: Dataset Sources [optional] Repository: Paper [optional]: Demo [optional]: Uses Direct Use Downstream Use [optional] Out-of-Scope Use Bias, Risks, and Limitations Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. How to Get Started with the Dataset Use the code below to get started with the dataset. Collection Details Source Data Collection Procedures Preprocessing [optional] Collection Hyperparameters Collection regime: Speeds, Sizes, Times [optional] Evaluation Testing Data, Factors & Metrics Testing Data Factors Metrics Results Summary Dataset Examination [optional] Environmental Impact Hardware Type: Hours used: Cloud Provider: Compute Region: Carbon Emitted: Technical Specifications [optional] Dataset Structure and Objective Compute Infrastructure Hardware Software Citation [optional] BibTeX: APA: Glossary [optional] More Information [optional] Dataset Card Authors [optional] Dataset Card Contact
[ "# Dataset Card\n\nDeveloped by: \n\nShared by [optional]: \n\nDataset type: \n\nLanguage(s) (NLP): \n\nLicense: \n\nDerived from dataset [optional]: \n\nDataset Sources [optional]\n\n\n\nRepository: \n\nPaper [optional]: \n\nDemo [optional]: \n\nUses\n\n\n\nDirect Use\n\n\n\n\n\nDownstream Use [optional]\n\n\n\n\n\nOut-of-Scope Use\n\n\n\n\n\nBias, Risks, and Limitations\n\n\n\n\n\nRecommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\nHow to Get Started with the Dataset\n\nUse the code below to get started with the dataset.\n\n\n\nCollection Details\n\nSource Data\n\n\n\n\n\nCollection Procedures\n\n\n\nPreprocessing [optional]\n\n\n\nCollection Hyperparameters\n\nCollection regime: \n\nSpeeds, Sizes, Times [optional]\n\n\n\n\n\nEvaluation\n\n\n\nTesting Data, Factors & Metrics\n\nTesting Data\n\n\n\n\n\nFactors\n\n\n\n\n\nMetrics\n\n\n\n\n\nResults\n\n\n\nSummary\n\nDataset Examination [optional]\n\n\n\n\n\nEnvironmental Impact\n\n\n\nHardware Type: \n\nHours used: \n\nCloud Provider: \n\nCompute Region: \n\nCarbon Emitted: \n\nTechnical Specifications [optional]\n\nDataset Structure and Objective\n\n\n\nCompute Infrastructure\n\n\n\nHardware\n\n\n\nSoftware\n\n\n\nCitation [optional]\n\n\nBibTeX:\n\n\n\nAPA:\n\n\n\nGlossary [optional]\n\n\n\n\n\nMore Information [optional]\n\n\n\nDataset Card Authors [optional]\n\n\n\nDataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card\n\nDeveloped by: \n\nShared by [optional]: \n\nDataset type: \n\nLanguage(s) (NLP): \n\nLicense: \n\nDerived from dataset [optional]: \n\nDataset Sources [optional]\n\n\n\nRepository: \n\nPaper [optional]: \n\nDemo [optional]: \n\nUses\n\n\n\nDirect Use\n\n\n\n\n\nDownstream Use [optional]\n\n\n\n\n\nOut-of-Scope Use\n\n\n\n\n\nBias, Risks, and Limitations\n\n\n\n\n\nRecommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\nHow to Get Started with the Dataset\n\nUse the code below to get started with the dataset.\n\n\n\nCollection Details\n\nSource Data\n\n\n\n\n\nCollection Procedures\n\n\n\nPreprocessing [optional]\n\n\n\nCollection Hyperparameters\n\nCollection regime: \n\nSpeeds, Sizes, Times [optional]\n\n\n\n\n\nEvaluation\n\n\n\nTesting Data, Factors & Metrics\n\nTesting Data\n\n\n\n\n\nFactors\n\n\n\n\n\nMetrics\n\n\n\n\n\nResults\n\n\n\nSummary\n\nDataset Examination [optional]\n\n\n\n\n\nEnvironmental Impact\n\n\n\nHardware Type: \n\nHours used: \n\nCloud Provider: \n\nCompute Region: \n\nCarbon Emitted: \n\nTechnical Specifications [optional]\n\nDataset Structure and Objective\n\n\n\nCompute Infrastructure\n\n\n\nHardware\n\n\n\nSoftware\n\n\n\nCitation [optional]\n\n\nBibTeX:\n\n\n\nAPA:\n\n\n\nGlossary [optional]\n\n\n\n\n\nMore Information [optional]\n\n\n\nDataset Card Authors [optional]\n\n\n\nDataset Card Contact" ]
[ 6, 302 ]
[ "passage: TAGS\n#region-us \n# Dataset Card\n\nDeveloped by: \n\nShared by [optional]: \n\nDataset type: \n\nLanguage(s) (NLP): \n\nLicense: \n\nDerived from dataset [optional]: \n\nDataset Sources [optional]\n\n\n\nRepository: \n\nPaper [optional]: \n\nDemo [optional]: \n\nUses\n\n\n\nDirect Use\n\n\n\n\n\nDownstream Use [optional]\n\n\n\n\n\nOut-of-Scope Use\n\n\n\n\n\nBias, Risks, and Limitations\n\n\n\n\n\nRecommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\nHow to Get Started with the Dataset\n\nUse the code below to get started with the dataset.\n\n\n\nCollection Details\n\nSource Data\n\n\n\n\n\nCollection Procedures\n\n\n\nPreprocessing [optional]\n\n\n\nCollection Hyperparameters\n\nCollection regime: \n\nSpeeds, Sizes, Times [optional]\n\n\n\n\n\nEvaluation\n\n\n\nTesting Data, Factors & Metrics\n\nTesting Data\n\n\n\n\n\nFactors\n\n\n\n\n\nMetrics\n\n\n\n\n\nResults\n\n\n\nSummary\n\nDataset Examination [optional]\n\n\n\n\n\nEnvironmental Impact\n\n\n\nHardware Type: \n\nHours used: \n\nCloud Provider: \n\nCompute Region: \n\nCarbon Emitted: \n\nTechnical Specifications [optional]\n\nDataset Structure and Objective\n\n\n\nCompute Infrastructure\n\n\n\nHardware\n\n\n\nSoftware\n\n\n\nCitation [optional]\n\n\nBibTeX:\n\n\n\nAPA:\n\n\n\nGlossary [optional]\n\n\n\n\n\nMore Information [optional]\n\n\n\nDataset Card Authors [optional]\n\n\n\nDataset Card Contact" ]
e8d0ce0ace880460cd85f8b05395ac02435d67ea
currently under heavy development --- license: mit ---
laker-julius-misha/correlated-errors
[ "region:us" ]
2023-11-06T22:08:16+00:00
{}
2024-02-14T02:03:52+00:00
[]
[]
TAGS #region-us
currently under heavy development --- license: mit ---
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
1cb3dfc5bb98d81fe56baf4ef49cc03b578bfcb7
[HackerNoon](https://hackernoon.com) curated the internet's most cited 7M+ tech company news articles and blog posts about the 3k+ most valuable tech companies in 2022 and 2023. These stories were curated to power [HackerNoon.com/Companies](https://hackernoon.com/companies), where we update daily news on top technology companies like [Microsoft](https://hackernoon.com/company/microsoft), [Google](https://hackernoon.com/company/google), and [HuggingFace](https://hackernoon.com/company/huggingface). Please use this news data freely for your project, and as always anyone is welcome to [publish on HackerNoon](https://hackernoon.com/p/publish).
HackerNoon/tech-company-news-data-dump
[ "task_categories:text-classification", "task_categories:summarization", "size_categories:1M<n<10M", "language:en", "license:mit", "news", "technology news", "company news", "tech company news", "tech news", "technology company news", "tech company blogs", "technology company blogs", "hackernoon", "hacker noon", "news curation", "tech news curation", "tech company news curation", "technology company news curation", "tech blog curation", "technology blog curation", "brave search api", "bing news api", "hackernoon api", "hacker noon api", "tech company news api", "technology company news api", "region:us" ]
2023-11-06T22:22:33+00:00
{"language": ["en"], "license": "mit", "size_categories": ["1M<n<10M"], "task_categories": ["text-classification", "summarization"], "tags": ["news", "technology news", "company news", "tech company news", "tech news", "technology company news", "tech company blogs", "technology company blogs", "hackernoon", "hacker noon", "news curation", "tech news curation", "tech company news curation", "technology company news curation", "tech blog curation", "technology blog curation", "brave search api", "bing news api", "hackernoon api", "hacker noon api", "tech company news api", "technology company news api"]}
2024-02-06T01:50:13+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_categories-summarization #size_categories-1M<n<10M #language-English #license-mit #news #technology news #company news #tech company news #tech news #technology company news #tech company blogs #technology company blogs #hackernoon #hacker noon #news curation #tech news curation #tech company news curation #technology company news curation #tech blog curation #technology blog curation #brave search api #bing news api #hackernoon api #hacker noon api #tech company news api #technology company news api #region-us
HackerNoon curated the internet's most cited 7M+ tech company news articles and blog posts about the 3k+ most valuable tech companies in 2022 and 2023. These stories were curated to power URL where we update daily news on top technology companies like Microsoft, Google, and HuggingFace. Please use this news data freely for your project, and as always anyone is welcome to publish on HackerNoon.
[]
[ "TAGS\n#task_categories-text-classification #task_categories-summarization #size_categories-1M<n<10M #language-English #license-mit #news #technology news #company news #tech company news #tech news #technology company news #tech company blogs #technology company blogs #hackernoon #hacker noon #news curation #tech news curation #tech company news curation #technology company news curation #tech blog curation #technology blog curation #brave search api #bing news api #hackernoon api #hacker noon api #tech company news api #technology company news api #region-us \n" ]
[ 157 ]
[ "passage: TAGS\n#task_categories-text-classification #task_categories-summarization #size_categories-1M<n<10M #language-English #license-mit #news #technology news #company news #tech company news #tech news #technology company news #tech company blogs #technology company blogs #hackernoon #hacker noon #news curation #tech news curation #tech company news curation #technology company news curation #tech blog curation #technology blog curation #brave search api #bing news api #hackernoon api #hacker noon api #tech company news api #technology company news api #region-us \n" ]
bbfb250446884b38da2c961ea3f45cf07c027330
# Dataset Card for "merged-no-pad-text-32768" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shossain/merged-no-pad-text-32768
[ "region:us" ]
2023-11-06T22:33:54+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 372426073, "num_examples": 3036}], "download_size": 180967260, "dataset_size": 372426073}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-06T22:34:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "merged-no-pad-text-32768" More Information needed
[ "# Dataset Card for \"merged-no-pad-text-32768\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"merged-no-pad-text-32768\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"merged-no-pad-text-32768\"\n\nMore Information needed" ]
bb54e67caa73139b20f9235db54c7fa8c8c5283a
# Dataset Card for "6f9ae809" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/6f9ae809
[ "region:us" ]
2023-11-06T22:35:39+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 170, "num_examples": 10}], "download_size": 1327, "dataset_size": 170}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-06T22:35:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "6f9ae809" More Information needed
[ "# Dataset Card for \"6f9ae809\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"6f9ae809\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"6f9ae809\"\n\nMore Information needed" ]
460186de8efac0863fcb7914e3d97e08a846ea25
# Dataset Card for "e82d3dfd" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
result-kand2-sdxl-wuerst-karlo/e82d3dfd
[ "region:us" ]
2023-11-06T22:35:42+00:00
{"dataset_info": {"features": [{"name": "result", "dtype": "string"}, {"name": "id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 170, "num_examples": 10}], "download_size": 1327, "dataset_size": 170}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-06T22:35:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "e82d3dfd" More Information needed
[ "# Dataset Card for \"e82d3dfd\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"e82d3dfd\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"e82d3dfd\"\n\nMore Information needed" ]
b4b05a6a80beafdbcbb44841fe42742e2bda5518
# Dataset Card for "merged-no-pad-32768" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shossain/merged-no-pad-32768
[ "region:us" ]
2023-11-06T22:40:47+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 1226023649, "num_examples": 3036}], "download_size": 337654761, "dataset_size": 1226023649}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-06T22:41:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "merged-no-pad-32768" More Information needed
[ "# Dataset Card for \"merged-no-pad-32768\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"merged-no-pad-32768\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"merged-no-pad-32768\"\n\nMore Information needed" ]
23aa0d33f253c92358a53822e0deb8fb42356950
RAG generated User interactions with GPT 3.5
SebastianBodza/CodingConversations
[ "language:en", "region:us" ]
2023-11-06T22:45:24+00:00
{"language": ["en"]}
2023-11-16T23:01:52+00:00
[]
[ "en" ]
TAGS #language-English #region-us
RAG generated User interactions with GPT 3.5
[]
[ "TAGS\n#language-English #region-us \n" ]
[ 10 ]
[ "passage: TAGS\n#language-English #region-us \n" ]
2d358b07693cae016e4ca465871c4f6f5eb33d7b
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
epptt/erukaLabels
[ "region:us" ]
2023-11-06T23:07:46+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "train.json"}]}]}
2023-11-16T05:51:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for Dataset Name This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 6, 34, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
b10a664468f38f9444561fc82c4ab1fbc5f0f081
# Dataset Card for "test1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
bys2058/test1
[ "region:us" ]
2023-11-06T23:07:47+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "image_caption", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 11228186.0, "num_examples": 25}], "download_size": 11224331, "dataset_size": 11228186.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-06T23:16:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test1" More Information needed
[ "# Dataset Card for \"test1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test1\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test1\"\n\nMore Information needed" ]
8f17730a9a812299acf8e13423b9c2761f529ffb
# Dataset Card for "ceval_all_dev" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
liyucheng/ceval_all_dev
[ "region:us" ]
2023-11-06T23:15:23+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int32"}, {"name": "question", "dtype": "string"}, {"name": "A", "dtype": "string"}, {"name": "B", "dtype": "string"}, {"name": "C", "dtype": "string"}, {"name": "D", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "explanation", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 167293, "num_examples": 260}, {"name": "dev", "num_bytes": 167293, "num_examples": 260}], "download_size": 238168, "dataset_size": 334586}}
2023-11-06T23:15:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ceval_all_dev" More Information needed
[ "# Dataset Card for \"ceval_all_dev\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ceval_all_dev\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ceval_all_dev\"\n\nMore Information needed" ]
45bd737e179390374150f35384e04841c965dd1a
# Dataset Card for "test2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
bys2058/test2
[ "region:us" ]
2023-11-06T23:18:28+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "image_caption", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 632594898.5, "num_examples": 1292}], "download_size": 632383145, "dataset_size": 632594898.5}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-06T23:57:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test2" More Information needed
[ "# Dataset Card for \"test2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test2\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test2\"\n\nMore Information needed" ]
301acc8f3f366ee8fbe3ed2b737a2011378c86f5
# Dataset Card for "mmlu_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
liyucheng/mmlu_train
[ "region:us" ]
2023-11-06T23:25:44+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "subject", "dtype": "string"}, {"name": "choices", "sequence": "string"}, {"name": "answer", "dtype": {"class_label": {"names": {"0": "A", "1": "B", "2": "C", "3": "D"}}}}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 162487145, "num_examples": 99842}], "download_size": 48165566, "dataset_size": 162487145}}
2023-11-06T23:26:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "mmlu_train" More Information needed
[ "# Dataset Card for \"mmlu_train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"mmlu_train\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"mmlu_train\"\n\nMore Information needed" ]
5273fb6c5af33a733a2478f5089eafc659707d08
# Dataset Card for "zephyrJavaCucumber" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
centroIA/zephyrJavaCucumber
[ "region:us" ]
2023-11-06T23:50:46+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "string"}, {"name": "__index_level_1__", "dtype": "string"}, {"name": "__index_level_2__", "dtype": "string"}, {"name": "__index_level_3__", "dtype": "string"}, {"name": "__index_level_4__", "dtype": "string"}, {"name": "__index_level_5__", "dtype": "string"}, {"name": "__index_level_6__", "dtype": "string"}, {"name": "__index_level_7__", "dtype": "string"}, {"name": "__index_level_8__", "dtype": "string"}, {"name": "__index_level_9__", "dtype": "string"}, {"name": "__index_level_10__", "dtype": "string"}, {"name": "__index_level_11__", "dtype": "string"}, {"name": "__index_level_12__", "dtype": "string"}, {"name": "__index_level_13__", "dtype": "string"}, {"name": "__index_level_14__", "dtype": "string"}, {"name": "__index_level_15__", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1137504, "num_examples": 165}], "download_size": 318943, "dataset_size": 1137504}}
2023-11-06T23:50:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "zephyrJavaCucumber" More Information needed
[ "# Dataset Card for \"zephyrJavaCucumber\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"zephyrJavaCucumber\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"zephyrJavaCucumber\"\n\nMore Information needed" ]
00a2a7027d71128cf1612a57f06d6cf83fabefac
Dataset using the bert-cased tokenizer, cutoff at 512 tokens. Original dataset: https://huggingface.co/datasets/bookcorpus
gmongaras/book_BERT_512
[ "region:us" ]
2023-11-07T00:22:43+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "token_type_ids", "sequence": "int8"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 228229039152, "num_examples": 74004228}], "download_size": 2826157131, "dataset_size": 228229039152}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-07T07:08:15+00:00
[]
[]
TAGS #region-us
Dataset using the bert-cased tokenizer, cutoff at 512 tokens. Original dataset: URL
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
1b289e670bd4f3aa0bae0171459e342d9dffae57
# Dataset Card for "zephyrJavaCucumberv2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
centroIA/zephyrJavaCucumberv2
[ "region:us" ]
2023-11-07T00:29:20+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1128286, "num_examples": 165}], "download_size": 269397, "dataset_size": 1128286}}
2023-11-07T00:29:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "zephyrJavaCucumberv2" More Information needed
[ "# Dataset Card for \"zephyrJavaCucumberv2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"zephyrJavaCucumberv2\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"zephyrJavaCucumberv2\"\n\nMore Information needed" ]
7c54c745c376b16cc10db06ec54466efa40b57b5
# Dataset Card for "autotrain-data-Nuclear_Fusion_Falcon" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
pseudolab/autotrain-data-Nuclear_Fusion_Falcon
[ "region:us" ]
2023-11-07T01:09:55+00:00
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "Magnetic Field Fluctuations", "dtype": "float64"}, {"name": "Leakage", "dtype": "float64"}, {"name": "Instabilities", "dtype": "float64"}, {"name": "Plasma Instabilities", "dtype": "float64"}, {"name": "Magnetic Field Strength", "dtype": "float64"}, {"name": "Injection Energy", "dtype": "float64"}, {"name": "Beam Symmetry", "dtype": "float64"}, {"name": "Target Density", "dtype": "float64"}, {"name": "Target Composition", "dtype": "string"}, {"name": "Fuel Density", "dtype": "float64"}, {"name": "Temperature", "dtype": "float64"}, {"name": "Confinement Time", "dtype": "float64"}, {"name": "Fuel Purity", "dtype": "float64"}, {"name": "Energy Input", "dtype": "float64"}, {"name": "Power Output", "dtype": "float64"}, {"name": "Pressure", "dtype": "float64"}, {"name": "Neutron Yield", "dtype": "float64"}, {"name": "Ignition", "dtype": "int64"}, {"name": "autotrain_text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 17566788, "num_examples": 100000}, {"name": "validation", "num_bytes": 17566788, "num_examples": 100000}], "download_size": 32112642, "dataset_size": 35133576}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}]}
2023-11-07T01:09:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "autotrain-data-Nuclear_Fusion_Falcon" More Information needed
[ "# Dataset Card for \"autotrain-data-Nuclear_Fusion_Falcon\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"autotrain-data-Nuclear_Fusion_Falcon\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"autotrain-data-Nuclear_Fusion_Falcon\"\n\nMore Information needed" ]
438b2ab6754aad7f61e251c5814d9b4106a2416b
This dataset is an unofficial split of [FEMNIST](https://github.com/TalwalkarLab/leaf/tree/master/data/femnist), researchers could generate the data by using following command line ``` ./preprocess.sh -s niid --iu 0.1 --sf 0.1 -k 64 -t sample --tf 0.9 --smplseed 1 --spltseed 1 ``` This dataset is converted back to .png imagse from the generated json files for better match with huggingface style.
AnsonZhang/FEMNIST-SMALL-C
[ "license:bsd-2-clause", "region:us" ]
2023-11-07T01:23:31+00:00
{"license": "bsd-2-clause"}
2023-11-07T23:57:45+00:00
[]
[]
TAGS #license-bsd-2-clause #region-us
This dataset is an unofficial split of FEMNIST, researchers could generate the data by using following command line This dataset is converted back to .png imagse from the generated json files for better match with huggingface style.
[]
[ "TAGS\n#license-bsd-2-clause #region-us \n" ]
[ 16 ]
[ "passage: TAGS\n#license-bsd-2-clause #region-us \n" ]
1ddf7250f0e4df2cf6e5c6aa46320b0c2664b198
# Dataset Card for "generad-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
generadaidemo/generad-dataset
[ "region:us" ]
2023-11-07T01:33:45+00:00
{"dataset_info": {"features": [{"name": "item", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "ad", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 947, "num_examples": 5}], "download_size": 3380, "dataset_size": 947}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-07T01:33:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "generad-dataset" More Information needed
[ "# Dataset Card for \"generad-dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"generad-dataset\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"generad-dataset\"\n\nMore Information needed" ]
dcff6c34d703a9da669c2c15c1bbc4f7ae7bcad1
# Dataset Card for "color_spec_cls" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arieg/color_spec_cls
[ "region:us" ]
2023-11-07T01:35:58+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "10", "1": "140", "2": "141", "3": "190", "4": "193", "5": "194", "6": "197", "7": "2", "8": "200", "9": "5"}}}}], "splits": [{"name": "train", "num_bytes": 10354796.0, "num_examples": 100}], "download_size": 10356873, "dataset_size": 10354796.0}}
2023-11-07T01:36:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "color_spec_cls" More Information needed
[ "# Dataset Card for \"color_spec_cls\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"color_spec_cls\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"color_spec_cls\"\n\nMore Information needed" ]
ef58136f227b20bd3c233b84965e7e14f9d85b45
# Dataset Card for "bigcode-pii-pjj_checks" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Vincentnien/bigcode-pii-pjj_checks
[ "region:us" ]
2023-11-07T01:48:36+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "repo_id", "dtype": "string"}, {"name": "file_path", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}, {"name": "index", "dtype": "int64"}, {"name": "secrets", "dtype": "string"}, {"name": "has_secrets", "dtype": "bool"}, {"name": "number_secrets", "dtype": "int64"}, {"name": "new_content", "dtype": "string"}, {"name": "modified", "dtype": "bool"}, {"name": "references", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 498783.18073461443, "num_examples": 46}], "download_size": 0, "dataset_size": 498783.18073461443}}
2023-11-07T01:56:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bigcode-pii-pjj_checks" More Information needed
[ "# Dataset Card for \"bigcode-pii-pjj_checks\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bigcode-pii-pjj_checks\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"bigcode-pii-pjj_checks\"\n\nMore Information needed" ]
f987859c3814aa8f8d0da4050ad596e7e8ae93e8
# Dataset Card for "cool_new_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lf-mlteam/cool_new_dataset
[ "region:us" ]
2023-11-07T02:06:27+00:00
{"dataset_info": {"features": [{"name": "name", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "ad", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3926, "num_examples": 5}], "download_size": 8782, "dataset_size": 3926}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-07T02:06:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cool_new_dataset" More Information needed
[ "# Dataset Card for \"cool_new_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cool_new_dataset\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cool_new_dataset\"\n\nMore Information needed" ]
c25ba4a28d8b7c69dfd95341b1d3bf8f8f99cc50
This training and validation dataset is a combination of Media Frame Corpus and Philippine Frame Corpus, labeled using the Policy Issue Frames Codebook. Train-test split of 80-20. Code_frames column contains annotations following the PolicyIssue Frames Codebook (1-15), wherein at least two(2) annotators agree with the label. The text column contains sentences/phrases from online news articles. The label column is the 0th index code_frames used for training.
jmLuis/MediaFrameCorpus-PhilippineFrameCorpus-Combined
[ "task_categories:text-classification", "size_categories:10K<n<100K", "language:en", "region:us" ]
2023-11-07T02:11:31+00:00
{"language": ["en"], "size_categories": ["10K<n<100K"], "task_categories": ["text-classification"], "pretty_name": "MFC+PFC"}
2023-11-07T02:24:15+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #size_categories-10K<n<100K #language-English #region-us
This training and validation dataset is a combination of Media Frame Corpus and Philippine Frame Corpus, labeled using the Policy Issue Frames Codebook. Train-test split of 80-20. Code_frames column contains annotations following the PolicyIssue Frames Codebook (1-15), wherein at least two(2) annotators agree with the label. The text column contains sentences/phrases from online news articles. The label column is the 0th index code_frames used for training.
[]
[ "TAGS\n#task_categories-text-classification #size_categories-10K<n<100K #language-English #region-us \n" ]
[ 33 ]
[ "passage: TAGS\n#task_categories-text-classification #size_categories-10K<n<100K #language-English #region-us \n" ]
cfc6efec815340b458dc331c979ad3ccb80c6b94
# Dataset Card for "clothing_samples" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sparkyfina/clothing_samples
[ "region:us" ]
2023-11-07T02:12:47+00:00
{"dataset_info": {"features": [{"name": "name", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "ad", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 7237, "num_examples": 5}], "download_size": 15054, "dataset_size": 7237}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-07T02:12:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "clothing_samples" More Information needed
[ "# Dataset Card for \"clothing_samples\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"clothing_samples\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"clothing_samples\"\n\nMore Information needed" ]
592270bf75194edbe726561cc8102de94ff0f2fa
# Dataset Card for "testy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
andreweduffy/testy
[ "region:us" ]
2023-11-07T02:14:53+00:00
{"dataset_info": {"features": [{"name": "filename", "dtype": "string"}, {"name": "document_id", "dtype": "string"}, {"name": "passage_id", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 11428076, "num_examples": 6578}], "download_size": 5198408, "dataset_size": 11428076}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-07T02:44:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "testy" More Information needed
[ "# Dataset Card for \"testy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"testy\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"testy\"\n\nMore Information needed" ]
861de994099a712931fa67db2d8df3a73b4345af
# Dataset Card for "webglm-qa" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vietgpt/webglm-qa
[ "region:us" ]
2023-11-07T02:18:48+00:00
{"dataset_info": {"features": [{"name": "messages", "list": [{"name": "content", "dtype": "string"}, {"name": "role", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 142583680, "num_examples": 43579}], "download_size": 60619763, "dataset_size": 142583680}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-07T02:19:12+00:00
[]
[]
TAGS #region-us
# Dataset Card for "webglm-qa" More Information needed
[ "# Dataset Card for \"webglm-qa\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"webglm-qa\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"webglm-qa\"\n\nMore Information needed" ]
ed000db0d474e6f9800680dbd8441512efbb64e0
# KoRAE Dataset <p align="center"><img src="https://cdn-uploads.huggingface.co/production/uploads/63e087b6a98d931aa90c1b9c/VVHlw268vUEdRAzUtjDhE.png", width=256, height=256></p> We used filtered high-quality Korean dataset for finetuning of KoRAE. First of all, we gathered Korean data and make the mixture of them. Then we filtered high-quality data from the combination of data through filtering method that introduced from [AlpaGasus](https://arxiv.org/abs/2307.08701). The overview of data processing procedure is as follws: 1. Collect various Korean dataset from HuggingFace Hub. 2. Rate the data quality using `gpt-3.5-turbo`. 3. Process the rated data and filter the high-scored data. Let's go deeper into data processing! ### 1. Korean dataset mixture We investigated several sources to collect high-quality Korean data, and among them, we collected data from the various sources. As a result, we were able to create a new dataset containing 64K pieces of data. The specific configuration of the dataset is as follows: |Dataset|# Nums| |---|---| |**[OpenOrca-ko](https://huggingface.co/datasets/kyujinpy/OpenOrca-KO)**|21.6k| |**[KOpen-Platypus](https://huggingface.co/datasets/kyujinpy/KOpen-platypus)**|24.9k| |**[KoCoT_2000](https://huggingface.co/datasets/kyujinpy/KoCoT_2000)**|2.1k| |**[databricks-dolly-15k-ko](https://huggingface.co/datasets/nlpai-lab/databricks-dolly-15k-ko)**|15k| |**Total**|63.7k| You can check the original KoRAE dataset here: [KoRAE_original](https://huggingface.co/datasets/Cartinoe5930/KoRAE_original) ### 2. Rating We utilized ChatGPT(gpt-3.5-turbo) as rater to rate the quality of dataset. We considered whether to use the prompt for the evaluation in Korean or English, but we thought it would be undesirable to give evaluations in different languages, so we conducted the evaluation using the Korean prompt. The overall data rating method was reffered to [AlpaGasus](https://arxiv.org/abs/2307.08701) ### 3. Processing & Filtering We postprocessed rated dataset after the rating. The main postprocessing procedure are as follows: - Wrong score extraction correction - Incorrect format dataset exclusion You can check the postprocessed KoRAE dataset here: [KoRAE_rated_filtered](https://huggingface.co/datasets/Cartinoe5930/KoRAE_rated_filtered) After the all postprocessing, we analysed the score distribution of rated dataset. As shown in the following figure, it was confirmed that 8-point data was the most. This confirms that KoRAE dataset consisted of high-quality data from the beginning. ![rated_dataset_distribution.png](https://cdn-uploads.huggingface.co/production/uploads/63e087b6a98d931aa90c1b9c/xeZmIDtINKgTV2wxtvuOs.png) However, We filtered data only with a score of 8.5 or higher and used it to finetune KoRAE for better performance. As a result, we were able to filter the dataset 64k to 12k! This 'KoRAE_filtered_12k' is the result of all previous processes. ## Github Repository For the more specific information, please check the following [Repository](https://github.com/gauss5930/KoRAE) ## Citation Thanks to [@kyujinpy](https://huggingface.co/kyujinpy) and [@nlp-ai](https://huggingface.co/nlpai-lab) for providing Korean datasets. - [KO-Platypus](https://github.com/Marker-Inc-Korea/KO-Platypus) - [Korean-OpenOrca](https://github.com/Marker-Inc-Korea/Korean-OpenOrca) ``` @inproceedings{lee2023kullm, title={KULLM: Learning to Construct Korean Instruction-following Large Language Models}, author={Lee, SeungJun and Lee, Taemin and Lee, Jeongwoo and Jang, Yoona and Lim, Heuiseok}, booktitle={Annual Conference on Human and Language Technology}, pages={196--202}, year={2023}, organization={Human and Language Technology} } ``` ``` @misc{chen2023alpagasus, title={AlpaGasus: Training A Better Alpaca with Fewer Data}, author={Lichang Chen and Shiyang Li and Jun Yan and Hai Wang and Kalpa Gunaratna and Vikas Yadav and Zheng Tang and Vijay Srinivasan and Tianyi Zhou and Heng Huang and Hongxia Jin}, year={2023}, eprint={2307.08701}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
Cartinoe5930/KoRAE_filtered_12k
[ "arxiv:2307.08701", "region:us" ]
2023-11-07T02:19:29+00:00
{"dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}, {"name": "review", "dtype": "string"}, {"name": "score", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 35802556, "num_examples": 12473}], "download_size": 18374150, "dataset_size": 35802556}}
2023-11-09T13:40:09+00:00
[ "2307.08701" ]
[]
TAGS #arxiv-2307.08701 #region-us
KoRAE Dataset ============= <img src="URL width=256, height=256> We used filtered high-quality Korean dataset for finetuning of KoRAE. First of all, we gathered Korean data and make the mixture of them. Then we filtered high-quality data from the combination of data through filtering method that introduced from AlpaGasus. The overview of data processing procedure is as follws: 1. Collect various Korean dataset from HuggingFace Hub. 2. Rate the data quality using 'gpt-3.5-turbo'. 3. Process the rated data and filter the high-scored data. Let's go deeper into data processing! ### 1. Korean dataset mixture We investigated several sources to collect high-quality Korean data, and among them, we collected data from the various sources. As a result, we were able to create a new dataset containing 64K pieces of data. The specific configuration of the dataset is as follows: You can check the original KoRAE dataset here: KoRAE\_original ### 2. Rating We utilized ChatGPT(gpt-3.5-turbo) as rater to rate the quality of dataset. We considered whether to use the prompt for the evaluation in Korean or English, but we thought it would be undesirable to give evaluations in different languages, so we conducted the evaluation using the Korean prompt. The overall data rating method was reffered to AlpaGasus ### 3. Processing & Filtering We postprocessed rated dataset after the rating. The main postprocessing procedure are as follows: * Wrong score extraction correction * Incorrect format dataset exclusion You can check the postprocessed KoRAE dataset here: KoRAE\_rated\_filtered After the all postprocessing, we analysed the score distribution of rated dataset. As shown in the following figure, it was confirmed that 8-point data was the most. This confirms that KoRAE dataset consisted of high-quality data from the beginning. !rated\_dataset\_distribution.png However, We filtered data only with a score of 8.5 or higher and used it to finetune KoRAE for better performance. As a result, we were able to filter the dataset 64k to 12k! This 'KoRAE\_filtered\_12k' is the result of all previous processes. Github Repository ----------------- For the more specific information, please check the following Repository Thanks to @kyujinpy and @nlp-ai for providing Korean datasets. * KO-Platypus * Korean-OpenOrca
[ "### 1. Korean dataset mixture\n\n\nWe investigated several sources to collect high-quality Korean data, and among them, we collected data from the various sources.\nAs a result, we were able to create a new dataset containing 64K pieces of data.\nThe specific configuration of the dataset is as follows:\n\n\n\nYou can check the original KoRAE dataset here: KoRAE\\_original", "### 2. Rating\n\n\nWe utilized ChatGPT(gpt-3.5-turbo) as rater to rate the quality of dataset.\nWe considered whether to use the prompt for the evaluation in Korean or English, but we thought it would be undesirable to give evaluations in different languages, so we conducted the evaluation using the Korean prompt.\nThe overall data rating method was reffered to AlpaGasus", "### 3. Processing & Filtering\n\n\nWe postprocessed rated dataset after the rating.\nThe main postprocessing procedure are as follows:\n\n\n* Wrong score extraction correction\n* Incorrect format dataset exclusion\n\n\nYou can check the postprocessed KoRAE dataset here: KoRAE\\_rated\\_filtered\n\n\nAfter the all postprocessing, we analysed the score distribution of rated dataset.\nAs shown in the following figure, it was confirmed that 8-point data was the most.\nThis confirms that KoRAE dataset consisted of high-quality data from the beginning.\n\n\n!rated\\_dataset\\_distribution.png\n\n\nHowever, We filtered data only with a score of 8.5 or higher and used it to finetune KoRAE for better performance.\nAs a result, we were able to filter the dataset 64k to 12k!\nThis 'KoRAE\\_filtered\\_12k' is the result of all previous processes.\n\n\nGithub Repository\n-----------------\n\n\nFor the more specific information, please check the following Repository\n\n\nThanks to @kyujinpy and @nlp-ai for providing Korean datasets.\n\n\n* KO-Platypus\n* Korean-OpenOrca" ]
[ "TAGS\n#arxiv-2307.08701 #region-us \n", "### 1. Korean dataset mixture\n\n\nWe investigated several sources to collect high-quality Korean data, and among them, we collected data from the various sources.\nAs a result, we were able to create a new dataset containing 64K pieces of data.\nThe specific configuration of the dataset is as follows:\n\n\n\nYou can check the original KoRAE dataset here: KoRAE\\_original", "### 2. Rating\n\n\nWe utilized ChatGPT(gpt-3.5-turbo) as rater to rate the quality of dataset.\nWe considered whether to use the prompt for the evaluation in Korean or English, but we thought it would be undesirable to give evaluations in different languages, so we conducted the evaluation using the Korean prompt.\nThe overall data rating method was reffered to AlpaGasus", "### 3. Processing & Filtering\n\n\nWe postprocessed rated dataset after the rating.\nThe main postprocessing procedure are as follows:\n\n\n* Wrong score extraction correction\n* Incorrect format dataset exclusion\n\n\nYou can check the postprocessed KoRAE dataset here: KoRAE\\_rated\\_filtered\n\n\nAfter the all postprocessing, we analysed the score distribution of rated dataset.\nAs shown in the following figure, it was confirmed that 8-point data was the most.\nThis confirms that KoRAE dataset consisted of high-quality data from the beginning.\n\n\n!rated\\_dataset\\_distribution.png\n\n\nHowever, We filtered data only with a score of 8.5 or higher and used it to finetune KoRAE for better performance.\nAs a result, we were able to filter the dataset 64k to 12k!\nThis 'KoRAE\\_filtered\\_12k' is the result of all previous processes.\n\n\nGithub Repository\n-----------------\n\n\nFor the more specific information, please check the following Repository\n\n\nThanks to @kyujinpy and @nlp-ai for providing Korean datasets.\n\n\n* KO-Platypus\n* Korean-OpenOrca" ]
[ 14, 85, 88, 266 ]
[ "passage: TAGS\n#arxiv-2307.08701 #region-us \n### 1. Korean dataset mixture\n\n\nWe investigated several sources to collect high-quality Korean data, and among them, we collected data from the various sources.\nAs a result, we were able to create a new dataset containing 64K pieces of data.\nThe specific configuration of the dataset is as follows:\n\n\n\nYou can check the original KoRAE dataset here: KoRAE\\_original### 2. Rating\n\n\nWe utilized ChatGPT(gpt-3.5-turbo) as rater to rate the quality of dataset.\nWe considered whether to use the prompt for the evaluation in Korean or English, but we thought it would be undesirable to give evaluations in different languages, so we conducted the evaluation using the Korean prompt.\nThe overall data rating method was reffered to AlpaGasus### 3. Processing & Filtering\n\n\nWe postprocessed rated dataset after the rating.\nThe main postprocessing procedure are as follows:\n\n\n* Wrong score extraction correction\n* Incorrect format dataset exclusion\n\n\nYou can check the postprocessed KoRAE dataset here: KoRAE\\_rated\\_filtered\n\n\nAfter the all postprocessing, we analysed the score distribution of rated dataset.\nAs shown in the following figure, it was confirmed that 8-point data was the most.\nThis confirms that KoRAE dataset consisted of high-quality data from the beginning.\n\n\n!rated\\_dataset\\_distribution.png\n\n\nHowever, We filtered data only with a score of 8.5 or higher and used it to finetune KoRAE for better performance.\nAs a result, we were able to filter the dataset 64k to 12k!\nThis 'KoRAE\\_filtered\\_12k' is the result of all previous processes.\n\n\nGithub Repository\n-----------------\n\n\nFor the more specific information, please check the following Repository\n\n\nThanks to @kyujinpy and @nlp-ai for providing Korean datasets.\n\n\n* KO-Platypus\n* Korean-OpenOrca" ]
fb66e737c712dbb7beeba0f821a1c32721bbc420
# Dataset Card for "check_other_demand_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
quocanh34/check_other_demand_dataset
[ "region:us" ]
2023-11-07T02:46:35+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 3888.0, "num_examples": 89}], "download_size": 3270, "dataset_size": 3888.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-07T02:46:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "check_other_demand_dataset" More Information needed
[ "# Dataset Card for \"check_other_demand_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"check_other_demand_dataset\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"check_other_demand_dataset\"\n\nMore Information needed" ]
1caa5930d543c3255f99d321849d702e231c72fd
# Dataset Card for "adGenPFH-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
pfh1976/adGenPFH-dataset
[ "region:us" ]
2023-11-07T03:03:52+00:00
{"dataset_info": {"features": [{"name": "name", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "ad", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1243, "num_examples": 5}], "download_size": 3930, "dataset_size": 1243}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-07T03:03:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "adGenPFH-dataset" More Information needed
[ "# Dataset Card for \"adGenPFH-dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"adGenPFH-dataset\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"adGenPFH-dataset\"\n\nMore Information needed" ]
a220b2c79653950f837272b37cb8c3cf0a5bec70
# Dataset Card for "naver_news_1024" Preprocessed and tokenized [daekeun-ml/naver-news-summarization-ko](https://huggingface.co/datasets/daekeun-ml/naver-news-summarization-ko) dataset for pretraining. \ Concatenated all text and split into chunk size of **1024**. \ Used tokenizer from [beomi/llama-2-ko-7b](https://huggingface.co/beomi/llama-2-ko-7b) [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lectura/naver_news_1024
[ "task_categories:text-generation", "size_categories:10K<n<100K", "source_datasets:daekeun-ml/naver-news-summarization-ko", "language:ko", "region:us" ]
2023-11-07T03:11:55+00:00
{"language": ["ko"], "size_categories": ["10K<n<100K"], "source_datasets": ["daekeun-ml/naver-news-summarization-ko"], "task_categories": ["text-generation"], "dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}], "splits": [{"name": "train", "num_bytes": 53074500, "num_examples": 12945}], "download_size": 23902205, "dataset_size": 53074500}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-12-01T02:00:49+00:00
[]
[ "ko" ]
TAGS #task_categories-text-generation #size_categories-10K<n<100K #source_datasets-daekeun-ml/naver-news-summarization-ko #language-Korean #region-us
# Dataset Card for "naver_news_1024" Preprocessed and tokenized daekeun-ml/naver-news-summarization-ko dataset for pretraining. \ Concatenated all text and split into chunk size of 1024. \ Used tokenizer from beomi/llama-2-ko-7b More Information needed
[ "# Dataset Card for \"naver_news_1024\"\n\nPreprocessed and tokenized daekeun-ml/naver-news-summarization-ko dataset for pretraining. \\\nConcatenated all text and split into chunk size of 1024. \\\nUsed tokenizer from beomi/llama-2-ko-7b\n\nMore Information needed" ]
[ "TAGS\n#task_categories-text-generation #size_categories-10K<n<100K #source_datasets-daekeun-ml/naver-news-summarization-ko #language-Korean #region-us \n", "# Dataset Card for \"naver_news_1024\"\n\nPreprocessed and tokenized daekeun-ml/naver-news-summarization-ko dataset for pretraining. \\\nConcatenated all text and split into chunk size of 1024. \\\nUsed tokenizer from beomi/llama-2-ko-7b\n\nMore Information needed" ]
[ 57, 81 ]
[ "passage: TAGS\n#task_categories-text-generation #size_categories-10K<n<100K #source_datasets-daekeun-ml/naver-news-summarization-ko #language-Korean #region-us \n# Dataset Card for \"naver_news_1024\"\n\nPreprocessed and tokenized daekeun-ml/naver-news-summarization-ko dataset for pretraining. \\\nConcatenated all text and split into chunk size of 1024. \\\nUsed tokenizer from beomi/llama-2-ko-7b\n\nMore Information needed" ]
55d05712533e775e9249f4f01eefbf5957b83b3f
# Dataset Card for Nexdata/Finnish_Spontaneous_Speech_Data ## Description The 233 Hours - Finnish Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: https://www.nexdata.ai/datasets/1248?source=Huggingface # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including conversation, course, life, etc ## Language Finnish ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a Word Accuracy Rate (WAR) of being no less than 95%. # Licensing Information Commercial License
Nexdata/Finnish_Spontaneous_Speech_Data
[ "task_categories:automatic-speech-recognition", "language:fi", "region:us" ]
2023-11-07T03:17:40+00:00
{"language": ["fi"], "task_categories": ["automatic-speech-recognition"], "YAML tags": [{"copy-paste the tags obtained with the tagging app": "https://github.com/huggingface/datasets-tagging"}]}
2023-11-10T07:44:43+00:00
[]
[ "fi" ]
TAGS #task_categories-automatic-speech-recognition #language-Finnish #region-us
# Dataset Card for Nexdata/Finnish_Spontaneous_Speech_Data ## Description The 233 Hours - Finnish Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: URL # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including conversation, course, life, etc ## Language Finnish ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a Word Accuracy Rate (WAR) of being no less than 95%. # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Finnish_Spontaneous_Speech_Data", "## Description\nThe 233 Hours - Finnish Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding conversation, course, life, etc", "## Language\nFinnish", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a Word Accuracy Rate (WAR) of being no less than 95%.", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-Finnish #region-us \n", "# Dataset Card for Nexdata/Finnish_Spontaneous_Speech_Data", "## Description\nThe 233 Hours - Finnish Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding conversation, course, life, etc", "## Language\nFinnish", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a Word Accuracy Rate (WAR) of being no less than 95%.", "# Licensing Information\nCommercial License" ]
[ 28, 23, 94, 3, 12, 11, 4, 17, 15, 21, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-Finnish #region-us \n# Dataset Card for Nexdata/Finnish_Spontaneous_Speech_Data## Description\nThe 233 Hours - Finnish Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz, 16bit, mono channel;## Content category\nincluding conversation, course, life, etc## Language\nFinnish## Annotation\nannotation for the transcription text, speaker identification, gender;## Application scenarios\nspeech recognition, video caption generation and video content review;## Accuracy\nat a Word Accuracy Rate (WAR) of being no less than 95%.# Licensing Information\nCommercial License" ]
1fc3df241fdbfda190d1e9c1d3a698c5d134e987
--- # Dataset Card for Nexdata/Urdu_Conversational_Speech_Data_by_Telephone ## Description The 196 Hours - Urdu Conversational Speech Data collected by telephone involved 270 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1242?source=Huggingface # Specifications ## Format 8kHz, 8bit, u-law/a-law pcm, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 270 speakers totally, with 56% male and 44% female;. ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language Urdu ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 95% # Licensing Information Commercial License
Nexdata/Urdu_Conversational_Speech_Data_by_Telephone
[ "task_categories:conversational", "language:ur", "region:us" ]
2023-11-07T03:26:13+00:00
{"language": ["ur"], "task_categories": ["conversational"]}
2023-11-10T07:43:37+00:00
[]
[ "ur" ]
TAGS #task_categories-conversational #language-Urdu #region-us
--- # Dataset Card for Nexdata/Urdu_Conversational_Speech_Data_by_Telephone ## Description The 196 Hours - Urdu Conversational Speech Data collected by telephone involved 270 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 8kHz, 8bit, u-law/a-law pcm, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 270 speakers totally, with 56% male and 44% female;. ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language Urdu ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 95% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Urdu_Conversational_Speech_Data_by_Telephone", "## Description\nThe 196 Hours - Urdu Conversational Speech Data collected by telephone involved 270 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz, 8bit, u-law/a-law pcm, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n270 speakers totally, with 56% male and 44% female;.", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nUrdu", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 95%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-conversational #language-Urdu #region-us \n", "# Dataset Card for Nexdata/Urdu_Conversational_Speech_Data_by_Telephone", "## Description\nThe 196 Hours - Urdu Conversational Speech Data collected by telephone involved 270 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz, 8bit, u-law/a-law pcm, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n270 speakers totally, with 56% male and 44% female;.", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nUrdu", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 95%", "# Licensing Information\nCommercial License" ]
[ 21, 26, 140, 3, 22, 16, 30, 19, 17, 8, 3, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-conversational #language-Urdu #region-us \n# Dataset Card for Nexdata/Urdu_Conversational_Speech_Data_by_Telephone## Description\nThe 196 Hours - Urdu Conversational Speech Data collected by telephone involved 270 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\nFor more details, please refer to the link: URL# Specifications## Format\n8kHz, 8bit, u-law/a-law pcm, mono channel;## Recording Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\n270 speakers totally, with 56% male and 44% female;.## Annotation\nannotating for the transcription text, speaker identification and gender## Device\nTelephony recording system;## Language\nUrdu## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 95%# Licensing Information\nCommercial License" ]
6b6f3bbcd453002449956ec42aaf9a2d39ef8635
--- # Dataset Card for Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone ## Description The 200 Hours - Pushtu Conversational Speech Data collected by telephone involved more than 230 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1248?source=Huggingface # Specifications ## Format 8kHz, 8bit, wav, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics About 230 people. ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language Pushtu ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 95% # Licensing Information Commercial License
Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone
[ "task_categories:conversational", "task_categories:automatic-speech-recognition", "language:ps", "region:us" ]
2023-11-07T03:29:53+00:00
{"language": ["ps"], "task_categories": ["conversational", "automatic-speech-recognition"]}
2023-11-22T09:36:27+00:00
[]
[ "ps" ]
TAGS #task_categories-conversational #task_categories-automatic-speech-recognition #language-Pushto #region-us
--- # Dataset Card for Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone ## Description The 200 Hours - Pushtu Conversational Speech Data collected by telephone involved more than 230 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 8kHz, 8bit, wav, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics About 230 people. ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language Pushtu ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 95% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone", "## Description\nThe 200 Hours - Pushtu Conversational Speech Data collected by telephone involved more than 230 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz, 8bit, wav, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\nAbout 230 people.", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nPushtu", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 95%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-conversational #task_categories-automatic-speech-recognition #language-Pushto #region-us \n", "# Dataset Card for Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone", "## Description\nThe 200 Hours - Pushtu Conversational Speech Data collected by telephone involved more than 230 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz, 8bit, wav, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\nAbout 230 people.", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nPushtu", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 95%", "# Licensing Information\nCommercial License" ]
[ 38, 27, 143, 3, 15, 16, 30, 8, 17, 8, 4, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-conversational #task_categories-automatic-speech-recognition #language-Pushto #region-us \n# Dataset Card for Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone## Description\nThe 200 Hours - Pushtu Conversational Speech Data collected by telephone involved more than 230 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n8kHz, 8bit, wav, mono channel;## Recording Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\nAbout 230 people.## Annotation\nannotating for the transcription text, speaker identification and gender## Device\nTelephony recording system;## Language\nPushtu## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 95%# Licensing Information\nCommercial License" ]
37ddc6e8b564389a32997e9a25139ad444ed94cc
# MultiSpider: Towards Benchmarking Multilingual Text-to-SQL Semantic Parsing In this work, we present MultiSpider, a multilingual text-to-SQL dataset which covers seven languages (English, German, French, Spanish, Japanese, Chinese, and Vietnamese). Find more details on [paper](https://arxiv.org/pdf/2212.13492.pdf) and [code](https://github.com/longxudou/multispider). Please be aware that the MultiSpider dataset is available in two versions: `with_English_value` and `with_original_value`. Our reported results are based on the `with_English_value` version to circumvent any discrepancies between the entities in the questions and the values in the database. The `with_original_value` version is a byproduct of the dataset creation process, which may be of interest for more in-depth research on this localized dataset. `with_English_value`: Führen Sie die Namen der Sängerinnen und Sänger auf, deren Staatsbürgerschaft nicht „France“ lautet. `with_original_value`: Führen Sie die Namen der Sängerinnen und Sänger auf, deren Staatsbürgerschaft nicht "Frankreich" lautet. ## Results | Model | EN | DE | ES | FR | JA | ZH | VI | | ------ | ----: | ----: | ----: | ----: | ----: | ----: | ----: | | Paper Report | 68.8 | 64.8 | 67.4 | 65.3 | 60.2 | 66.1 | 67.1 | | Released Model | 69.5 | 65.1 | 68.1 | 66.7 | 60.9 | 67.4 | 69.1 | ## Copyright Except where stated explicitly otherwise, the copyright to the source code is licensed under the Creative Commons - Attribution-NonCommercial 4.0 International license (CC BY-NC 4.0): https://creativecommons.org/licenses/by-nc/4.0/. Any commercial use (whether for the benefit of third parties or internally in production) requires an explicit license.
dreamerdeo/multispider
[ "size_categories:10K<n<100K", "language:en", "language:fr", "language:de", "language:vi", "language:zh", "language:ja", "language:es", "license:cc", "arxiv:2212.13492", "region:us" ]
2023-11-07T03:39:17+00:00
{"language": ["en", "fr", "de", "vi", "zh", "ja", "es"], "license": "cc", "size_categories": ["10K<n<100K"]}
2023-11-07T13:04:03+00:00
[ "2212.13492" ]
[ "en", "fr", "de", "vi", "zh", "ja", "es" ]
TAGS #size_categories-10K<n<100K #language-English #language-French #language-German #language-Vietnamese #language-Chinese #language-Japanese #language-Spanish #license-cc #arxiv-2212.13492 #region-us
MultiSpider: Towards Benchmarking Multilingual Text-to-SQL Semantic Parsing =========================================================================== In this work, we present MultiSpider, a multilingual text-to-SQL dataset which covers seven languages (English, German, French, Spanish, Japanese, Chinese, and Vietnamese). Find more details on paper and code. Please be aware that the MultiSpider dataset is available in two versions: 'with\_English\_value' and 'with\_original\_value'. Our reported results are based on the 'with\_English\_value' version to circumvent any discrepancies between the entities in the questions and the values in the database. The 'with\_original\_value' version is a byproduct of the dataset creation process, which may be of interest for more in-depth research on this localized dataset. 'with\_English\_value': Führen Sie die Namen der Sängerinnen und Sänger auf, deren Staatsbürgerschaft nicht „France“ lautet. 'with\_original\_value': Führen Sie die Namen der Sängerinnen und Sänger auf, deren Staatsbürgerschaft nicht "Frankreich" lautet. Results ------- Copyright --------- Except where stated explicitly otherwise, the copyright to the source code is licensed under the Creative Commons - Attribution-NonCommercial 4.0 International license (CC BY-NC 4.0): URL Any commercial use (whether for the benefit of third parties or internally in production) requires an explicit license.
[]
[ "TAGS\n#size_categories-10K<n<100K #language-English #language-French #language-German #language-Vietnamese #language-Chinese #language-Japanese #language-Spanish #license-cc #arxiv-2212.13492 #region-us \n" ]
[ 69 ]
[ "passage: TAGS\n#size_categories-10K<n<100K #language-English #language-French #language-German #language-Vietnamese #language-Chinese #language-Japanese #language-Spanish #license-cc #arxiv-2212.13492 #region-us \n" ]
135a387b39ea59203482101ba99b9cf2379b5b67
# Anime style image - text by GPT4V small dataset ![cute1.png](cute1.png) ## The text is as follows: This is a charming anime-style illustration featuring a young girl as the main subject. The image predominantly uses a soft, pastel color palette, creating a gentle and whimsical ambiance. The main character has light blonde hair styled in two low twintails, secured with what could be interpreted as dark-colored hair ties or ribbons. She has large expressive blue eyes and a demure expression, with her mouth slightly open as if she is about to speak or is quietly admiring something. A black hairband is perched on top of her head. She is dressed in an outfit that radiates a youthful, almost springtime elegance. She wears a long-sleeved white coat, with the sleeves rolled up to just below the elbow, revealing a light green dress with a floral hem design underneath. The dress itself is a rich, green color with a subtle texture that suggests a fabric like cotton or linen. It is accented with small white, yellow-centered flowers near the hem, which also features a ruffled fringe hinting at layers beneath. Around her neck, she has a thin, green scarf or kerchief, and her feet are adorned with sturdy black boots with brown soles and notable detailing, including black laces tied in neat bows. In her right hand, the girl holds a glass of what appears to be a cold, whipped cream-topped beverage, the kind typically found at a cafe. On her left, she gently cradles a triangular-shaped pastry, possibly a slice of pie or cake, on a small, simple plate. To her right, the image shows a smaller rendition of the girl in a similar pose but without food or drink, emphasizing her adorable and innocent demeanor. Additionally, there are two cute white rabbits in the image, one sitting directly in front of the girl and the other to her left. The rabbit in front wears a collar with a bell, hinting at it being a pet. The one on the left appears to be free and unadorned. Both rabbits have their attention directed towards the girl, further amplifying the sweetness and serene nature of the scene. Leaf motifs and plant elements are scattered throughout the image, further establishing the connection to nature and spring. The overall composition is bordered by a teal background, which contrasts with the lighter colors and helps the central elements to stand out. The backdrop features subtle watercolor-effects, adding texture and visual interest. Lastly, text elements on the image read "MatsoTie, Mity Litite, Ianoiynote," and "magnolia kat," likely representing illustrative or fictional branding and the artist's signature, respectively. The chosen font for the main text is elegant and simple, maintaining the gentle aesthetics of the artwork. ## format - cute1.png+cute1.txt - [llava.json](llava.json) - [metadata.csv](metadata.csv) Thanks https://huggingface.co/datasets/p1atdev/niji-v5 . ## Restriction You may not develop models that compete with OpenAI because of [OpenAI's terms of use](https://openai.com/policies/terms-of-use).
alfredplpl/anime-with-gpt4v-caption-for-lora
[ "language:en", "license:cc-by-nc-4.0", "region:us" ]
2023-11-07T04:48:18+00:00
{"language": ["en"], "license": "cc-by-nc-4.0"}
2023-11-28T08:16:42+00:00
[]
[ "en" ]
TAGS #language-English #license-cc-by-nc-4.0 #region-us
# Anime style image - text by GPT4V small dataset !URL ## The text is as follows: This is a charming anime-style illustration featuring a young girl as the main subject. The image predominantly uses a soft, pastel color palette, creating a gentle and whimsical ambiance. The main character has light blonde hair styled in two low twintails, secured with what could be interpreted as dark-colored hair ties or ribbons. She has large expressive blue eyes and a demure expression, with her mouth slightly open as if she is about to speak or is quietly admiring something. A black hairband is perched on top of her head. She is dressed in an outfit that radiates a youthful, almost springtime elegance. She wears a long-sleeved white coat, with the sleeves rolled up to just below the elbow, revealing a light green dress with a floral hem design underneath. The dress itself is a rich, green color with a subtle texture that suggests a fabric like cotton or linen. It is accented with small white, yellow-centered flowers near the hem, which also features a ruffled fringe hinting at layers beneath. Around her neck, she has a thin, green scarf or kerchief, and her feet are adorned with sturdy black boots with brown soles and notable detailing, including black laces tied in neat bows. In her right hand, the girl holds a glass of what appears to be a cold, whipped cream-topped beverage, the kind typically found at a cafe. On her left, she gently cradles a triangular-shaped pastry, possibly a slice of pie or cake, on a small, simple plate. To her right, the image shows a smaller rendition of the girl in a similar pose but without food or drink, emphasizing her adorable and innocent demeanor. Additionally, there are two cute white rabbits in the image, one sitting directly in front of the girl and the other to her left. The rabbit in front wears a collar with a bell, hinting at it being a pet. The one on the left appears to be free and unadorned. Both rabbits have their attention directed towards the girl, further amplifying the sweetness and serene nature of the scene. Leaf motifs and plant elements are scattered throughout the image, further establishing the connection to nature and spring. The overall composition is bordered by a teal background, which contrasts with the lighter colors and helps the central elements to stand out. The backdrop features subtle watercolor-effects, adding texture and visual interest. Lastly, text elements on the image read "MatsoTie, Mity Litite, Ianoiynote," and "magnolia kat," likely representing illustrative or fictional branding and the artist's signature, respectively. The chosen font for the main text is elegant and simple, maintaining the gentle aesthetics of the artwork. ## format - URL+URL - URL - URL Thanks URL . ## Restriction You may not develop models that compete with OpenAI because of OpenAI's terms of use.
[ "# Anime style image - text by GPT4V small dataset\n\n!URL", "## The text is as follows:\n\nThis is a charming anime-style illustration featuring a young girl as the main subject. The image predominantly uses a soft, pastel color palette, creating a gentle and whimsical ambiance. The main character has light blonde hair styled in two low twintails, secured with what could be interpreted as dark-colored hair ties or ribbons. She has large expressive blue eyes and a demure expression, with her mouth slightly open as if she is about to speak or is quietly admiring something. A black hairband is perched on top of her head. She is dressed in an outfit that radiates a youthful, almost springtime elegance. She wears a long-sleeved white coat, with the sleeves rolled up to just below the elbow, revealing a light green dress with a floral hem design underneath. The dress itself is a rich, green color with a subtle texture that suggests a fabric like cotton or linen. It is accented with small white, yellow-centered flowers near the hem, which also features a ruffled fringe hinting at layers beneath. Around her neck, she has a thin, green scarf or kerchief, and her feet are adorned with sturdy black boots with brown soles and notable detailing, including black laces tied in neat bows. In her right hand, the girl holds a glass of what appears to be a cold, whipped cream-topped beverage, the kind typically found at a cafe. On her left, she gently cradles a triangular-shaped pastry, possibly a slice of pie or cake, on a small, simple plate. To her right, the image shows a smaller rendition of the girl in a similar pose but without food or drink, emphasizing her adorable and innocent demeanor. Additionally, there are two cute white rabbits in the image, one sitting directly in front of the girl and the other to her left. The rabbit in front wears a collar with a bell, hinting at it being a pet. The one on the left appears to be free and unadorned. Both rabbits have their attention directed towards the girl, further amplifying the sweetness and serene nature of the scene. Leaf motifs and plant elements are scattered throughout the image, further establishing the connection to nature and spring. The overall composition is bordered by a teal background, which contrasts with the lighter colors and helps the central elements to stand out. The backdrop features subtle watercolor-effects, adding texture and visual interest. Lastly, text elements on the image read \"MatsoTie, Mity Litite, Ianoiynote,\" and \"magnolia kat,\" likely representing illustrative or fictional branding and the artist's signature, respectively. The chosen font for the main text is elegant and simple, maintaining the gentle aesthetics of the artwork.", "## format\n- URL+URL\n- URL\n- URL\n\nThanks URL .", "## Restriction\nYou may not develop models that compete with OpenAI because of OpenAI's terms of use." ]
[ "TAGS\n#language-English #license-cc-by-nc-4.0 #region-us \n", "# Anime style image - text by GPT4V small dataset\n\n!URL", "## The text is as follows:\n\nThis is a charming anime-style illustration featuring a young girl as the main subject. The image predominantly uses a soft, pastel color palette, creating a gentle and whimsical ambiance. The main character has light blonde hair styled in two low twintails, secured with what could be interpreted as dark-colored hair ties or ribbons. She has large expressive blue eyes and a demure expression, with her mouth slightly open as if she is about to speak or is quietly admiring something. A black hairband is perched on top of her head. She is dressed in an outfit that radiates a youthful, almost springtime elegance. She wears a long-sleeved white coat, with the sleeves rolled up to just below the elbow, revealing a light green dress with a floral hem design underneath. The dress itself is a rich, green color with a subtle texture that suggests a fabric like cotton or linen. It is accented with small white, yellow-centered flowers near the hem, which also features a ruffled fringe hinting at layers beneath. Around her neck, she has a thin, green scarf or kerchief, and her feet are adorned with sturdy black boots with brown soles and notable detailing, including black laces tied in neat bows. In her right hand, the girl holds a glass of what appears to be a cold, whipped cream-topped beverage, the kind typically found at a cafe. On her left, she gently cradles a triangular-shaped pastry, possibly a slice of pie or cake, on a small, simple plate. To her right, the image shows a smaller rendition of the girl in a similar pose but without food or drink, emphasizing her adorable and innocent demeanor. Additionally, there are two cute white rabbits in the image, one sitting directly in front of the girl and the other to her left. The rabbit in front wears a collar with a bell, hinting at it being a pet. The one on the left appears to be free and unadorned. Both rabbits have their attention directed towards the girl, further amplifying the sweetness and serene nature of the scene. Leaf motifs and plant elements are scattered throughout the image, further establishing the connection to nature and spring. The overall composition is bordered by a teal background, which contrasts with the lighter colors and helps the central elements to stand out. The backdrop features subtle watercolor-effects, adding texture and visual interest. Lastly, text elements on the image read \"MatsoTie, Mity Litite, Ianoiynote,\" and \"magnolia kat,\" likely representing illustrative or fictional branding and the artist's signature, respectively. The chosen font for the main text is elegant and simple, maintaining the gentle aesthetics of the artwork.", "## format\n- URL+URL\n- URL\n- URL\n\nThanks URL .", "## Restriction\nYou may not develop models that compete with OpenAI because of OpenAI's terms of use." ]
[ 21, 16, 666, 14, 24 ]
[ "passage: TAGS\n#language-English #license-cc-by-nc-4.0 #region-us \n# Anime style image - text by GPT4V small dataset\n\n!URL" ]
5b551a5d2d0b3a930b3bd792122765c583b9cb9d
# Dataset Card for "guanaco-llama2-200" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
prashantpansare/guanaco-llama2-200
[ "region:us" ]
2023-11-07T05:15:36+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 338808, "num_examples": 200}], "download_size": 201257, "dataset_size": 338808}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-07T05:15:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "guanaco-llama2-200" More Information needed
[ "# Dataset Card for \"guanaco-llama2-200\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"guanaco-llama2-200\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"guanaco-llama2-200\"\n\nMore Information needed" ]
1832e82482dc4f823d641b83c7e42b3ef2b6cd74
# Dataset Card for "KorQuAD_2.0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
leeseeun/KorQuAD_2.0
[ "region:us" ]
2023-11-07T05:42:38+00:00
{"dataset_info": {"features": [{"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 48148796, "num_examples": 83486}], "download_size": 29849379, "dataset_size": 48148796}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-07T05:45:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "KorQuAD_2.0" More Information needed
[ "# Dataset Card for \"KorQuAD_2.0\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"KorQuAD_2.0\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"KorQuAD_2.0\"\n\nMore Information needed" ]
799ad9b3f4004ef450a715fee3c09c461ba7a7f1
# Dataset Card for "mywitch3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
beyonddata/mywitch3
[ "region:us" ]
2023-11-07T05:51:08+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "cap", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 500880.0, "num_examples": 11}], "download_size": 501794, "dataset_size": 500880.0}}
2023-11-07T05:51:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "mywitch3" More Information needed
[ "# Dataset Card for \"mywitch3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"mywitch3\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"mywitch3\"\n\nMore Information needed" ]
c433b9e0a91c3fbeb2c19099b8ac62928958cc3b
# Dataset Card for "JimmyLuAugSeq" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
bigheiniuJ/JimmyLuAugSeq
[ "region:us" ]
2023-11-07T06:05:03+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "output", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "seed", "dtype": "string"}, {"name": "split", "dtype": "string"}, {"name": "task", "dtype": "string"}, {"name": "options", "sequence": "string"}, {"name": "id", "dtype": "int64"}, {"name": "aug_type", "dtype": "string"}, {"name": "aug_time", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 3560026221, "num_examples": 6653232}], "download_size": 743700150, "dataset_size": 3560026221}}
2023-11-07T06:07:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "JimmyLuAugSeq" More Information needed
[ "# Dataset Card for \"JimmyLuAugSeq\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"JimmyLuAugSeq\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"JimmyLuAugSeq\"\n\nMore Information needed" ]
6beb0279dc1b1c97e0cdc617b16271d12e004653
# Dataset Card for "MultiCite-classification-gold-context" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kejian/MultiCite-classification-gold-context
[ "region:us" ]
2023-11-07T06:22:43+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "x", "dtype": "string"}, {"name": "y", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1644148, "num_examples": 5491}, {"name": "test", "num_bytes": 971485, "num_examples": 3313}, {"name": "validation", "num_bytes": 761342, "num_examples": 2447}], "download_size": 1478485, "dataset_size": 3376975}}
2023-11-07T06:22:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "MultiCite-classification-gold-context" More Information needed
[ "# Dataset Card for \"MultiCite-classification-gold-context\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"MultiCite-classification-gold-context\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"MultiCite-classification-gold-context\"\n\nMore Information needed" ]
320c709624b60135010de6cf44a15281910efc86
# Dataset Card for "law" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ``` DatasetDict({ law: DatasetDict({ train: DatasetDict({ raw: Dataset({ features: ['info', 'concerned', 'org', 'disposal', 'mentionedItems', 'assrs', 'facts', 'dcss', 'close'], num_rows: 107 }) label: Dataset({ features: ['info', 'concerned', 'org', 'disposal', 'mentionedItems', 'assrs', 'facts', 'dcss', 'close'], num_rows: 107 }) }) validation: DatasetDict({ raw: Dataset({ features: ['info', 'concerned', 'org', 'disposal', 'mentionedItems', 'assrs', 'facts', 'dcss', 'close'], num_rows: 1000 }) label: Dataset({ features: ['info', 'concerned', 'org', 'disposal', 'mentionedItems', 'assrs', 'facts', 'dcss', 'close'], num_rows: 1000 }) }) }) term: DatasetDict({ train: Dataset({ features: ['category', 'name', 'cn'], num_rows: 7847 }) validation: Dataset({ features: ['category', 'name', 'cn'], num_rows: 986 }) }) }) ```
brainer/law-term
[ "region:us" ]
2023-11-07T06:23:57+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "raw", "path": "data/raw-*"}, {"split": "label", "path": "data/label-*"}]}, {"config_name": "law/train", "data_files": [{"split": "raw", "path": "law/train/raw-*"}, {"split": "label", "path": "law/train/label-*"}]}, {"config_name": "law/validation", "data_files": [{"split": "raw", "path": "law/validation/raw-*"}, {"split": "label", "path": "law/validation/label-*"}]}, {"config_name": "term/train", "data_files": [{"split": "train", "path": "term/train/train-*"}]}, {"config_name": "term/validation", "data_files": [{"split": "train", "path": "term/validation/train-*"}]}, {"config_name": "train", "data_files": [{"split": "raw", "path": "train/raw-*"}, {"split": "label", "path": "train/label-*"}]}, {"config_name": "validation", "data_files": [{"split": "raw", "path": "validation/raw-*"}, {"split": "label", "path": "validation/label-*"}]}], "dataset_info": [{"config_name": "default", "features": [{"name": "info", "struct": [{"name": "caseField", "dtype": "string"}, {"name": "caseNm", "dtype": "string"}, {"name": "caseNo", "dtype": "string"}, {"name": "courtNm", "dtype": "string"}, {"name": "detailField", "dtype": "string"}, {"name": "judmnAdjuDe", "dtype": "string"}, {"name": "qotatPrcdnt", "sequence": "string"}, {"name": "relateLaword", "sequence": "string"}, {"name": "trailField", "dtype": "string"}]}, {"name": "concerned", "struct": [{"name": "acusr", "dtype": "string"}, {"name": "dedat", "dtype": "string"}]}, {"name": "org", "struct": [{"name": "orgJdgmnAdjuDe", "dtype": "string"}, {"name": "orgJdgmnCaseNo", "dtype": "string"}, {"name": "orgJdgmnCourtNm", "dtype": "string"}]}, {"name": "disposal", "struct": [{"name": "disposalcontent", "sequence": "string"}, {"name": "disposalform", "dtype": "string"}]}, {"name": "mentionedItems", "struct": [{"name": "rqestObjet", "sequence": "string"}]}, {"name": "assrs", "struct": [{"name": "acusrAssrs", "sequence": "string"}, {"name": "dedatAssrs", "sequence": "string"}]}, {"name": "facts", "struct": [{"name": "bsisFacts", "sequence": "string"}]}, {"name": "dcss", "struct": [{"name": "courtDcss", "sequence": "string"}]}, {"name": "close", "struct": [{"name": "cnclsns", "sequence": "string"}]}], "splits": [{"name": "raw", "num_bytes": 10516539, "num_examples": 1000}, {"name": "label", "num_bytes": 10516539, "num_examples": 1000}], "download_size": 9908098, "dataset_size": 21033078}, {"config_name": "law/train", "features": [{"name": "info", "struct": [{"name": "caseField", "dtype": "string"}, {"name": "caseNm", "dtype": "string"}, {"name": "caseNo", "dtype": "string"}, {"name": "courtNm", "dtype": "string"}, {"name": "detailField", "dtype": "string"}, {"name": "judmnAdjuDe", "dtype": "string"}, {"name": "qotatPrcdnt", "sequence": "string"}, {"name": "relateLaword", "sequence": "string"}, {"name": "trailField", "dtype": "string"}]}, {"name": "concerned", "struct": [{"name": "acusr", "dtype": "string"}, {"name": "dedat", "dtype": "string"}]}, {"name": "org", "struct": [{"name": "orgJdgmnAdjuDe", "dtype": "string"}, {"name": "orgJdgmnCaseNo", "dtype": "string"}, {"name": "orgJdgmnCourtNm", "dtype": "string"}]}, {"name": "disposal", "struct": [{"name": "disposalcontent", "sequence": "string"}, {"name": "disposalform", "dtype": "string"}]}, {"name": "mentionedItems", "struct": [{"name": "rqestObjet", "sequence": "string"}]}, {"name": "assrs", "struct": [{"name": "acusrAssrs", "sequence": "string"}, {"name": "dedatAssrs", "sequence": "string"}]}, {"name": "facts", "struct": [{"name": "bsisFacts", "sequence": "string"}]}, {"name": "dcss", "struct": [{"name": "courtDcss", "sequence": "string"}]}, {"name": "close", "struct": [{"name": "cnclsns", "sequence": "string"}]}], "splits": [{"name": "raw", "num_bytes": 1245935, "num_examples": 107}, {"name": "label", "num_bytes": 1245935, "num_examples": 107}], "download_size": 1217908, "dataset_size": 2491870}, {"config_name": "law/validation", "features": [{"name": "info", "struct": [{"name": "caseField", "dtype": "string"}, {"name": "caseNm", "dtype": "string"}, {"name": "caseNo", "dtype": "string"}, {"name": "courtNm", "dtype": "string"}, {"name": "detailField", "dtype": "string"}, {"name": "judmnAdjuDe", "dtype": "string"}, {"name": "qotatPrcdnt", "sequence": "string"}, {"name": "relateLaword", "sequence": "string"}, {"name": "trailField", "dtype": "string"}]}, {"name": "concerned", "struct": [{"name": "acusr", "dtype": "string"}, {"name": "dedat", "dtype": "string"}]}, {"name": "org", "struct": [{"name": "orgJdgmnAdjuDe", "dtype": "string"}, {"name": "orgJdgmnCaseNo", "dtype": "string"}, {"name": "orgJdgmnCourtNm", "dtype": "string"}]}, {"name": "disposal", "struct": [{"name": "disposalcontent", "sequence": "string"}, {"name": "disposalform", "dtype": "string"}]}, {"name": "mentionedItems", "struct": [{"name": "rqestObjet", "sequence": "string"}]}, {"name": "assrs", "struct": [{"name": "acusrAssrs", "sequence": "string"}, {"name": "dedatAssrs", "sequence": "string"}]}, {"name": "facts", "struct": [{"name": "bsisFacts", "sequence": "string"}]}, {"name": "dcss", "struct": [{"name": "courtDcss", "sequence": "string"}]}, {"name": "close", "struct": [{"name": "cnclsns", "sequence": "string"}]}], "splits": [{"name": "raw", "num_bytes": 10516539, "num_examples": 1000}, {"name": "label", "num_bytes": 10516539, "num_examples": 1000}], "download_size": 9908098, "dataset_size": 21033078}, {"config_name": "term/train", "features": [{"name": "category", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "cn", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 186613726, "num_examples": 7847}], "download_size": 82250437, "dataset_size": 186613726}, {"config_name": "term/validation", "features": [{"name": "category", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "cn", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 19944422, "num_examples": 986}], "download_size": 8955609, "dataset_size": 19944422}, {"config_name": "train", "features": [{"name": "info", "struct": [{"name": "caseField", "dtype": "string"}, {"name": "caseNm", "dtype": "string"}, {"name": "caseNo", "dtype": "string"}, {"name": "courtNm", "dtype": "string"}, {"name": "detailField", "dtype": "string"}, {"name": "judmnAdjuDe", "dtype": "string"}, {"name": "qotatPrcdnt", "sequence": "string"}, {"name": "relateLaword", "sequence": "string"}, {"name": "trailField", "dtype": "string"}]}, {"name": "concerned", "struct": [{"name": "acusr", "dtype": "string"}, {"name": "dedat", "dtype": "string"}]}, {"name": "org", "struct": [{"name": "orgJdgmnAdjuDe", "dtype": "string"}, {"name": "orgJdgmnCaseNo", "dtype": "string"}, {"name": "orgJdgmnCourtNm", "dtype": "string"}]}, {"name": "disposal", "struct": [{"name": "disposalcontent", "sequence": "string"}, {"name": "disposalform", "dtype": "string"}]}, {"name": "mentionedItems", "struct": [{"name": "rqestObjet", "sequence": "string"}]}, {"name": "assrs", "struct": [{"name": "acusrAssrs", "sequence": "string"}, {"name": "dedatAssrs", "sequence": "string"}]}, {"name": "facts", "struct": [{"name": "bsisFacts", "sequence": "string"}]}, {"name": "dcss", "struct": [{"name": "courtDcss", "sequence": "string"}]}, {"name": "close", "struct": [{"name": "cnclsns", "sequence": "string"}]}], "splits": [{"name": "raw", "num_bytes": 1245935, "num_examples": 107}, {"name": "label", "num_bytes": 1245935, "num_examples": 107}], "download_size": 1217908, "dataset_size": 2491870}, {"config_name": "validation", "features": [{"name": "info", "struct": [{"name": "caseField", "dtype": "string"}, {"name": "caseNm", "dtype": "string"}, {"name": "caseNo", "dtype": "string"}, {"name": "courtNm", "dtype": "string"}, {"name": "detailField", "dtype": "string"}, {"name": "judmnAdjuDe", "dtype": "string"}, {"name": "qotatPrcdnt", "sequence": "string"}, {"name": "relateLaword", "sequence": "string"}, {"name": "trailField", "dtype": "string"}]}, {"name": "concerned", "struct": [{"name": "acusr", "dtype": "string"}, {"name": "dedat", "dtype": "string"}]}, {"name": "org", "struct": [{"name": "orgJdgmnAdjuDe", "dtype": "string"}, {"name": "orgJdgmnCaseNo", "dtype": "string"}, {"name": "orgJdgmnCourtNm", "dtype": "string"}]}, {"name": "disposal", "struct": [{"name": "disposalcontent", "sequence": "string"}, {"name": "disposalform", "dtype": "string"}]}, {"name": "mentionedItems", "struct": [{"name": "rqestObjet", "sequence": "string"}]}, {"name": "assrs", "struct": [{"name": "acusrAssrs", "sequence": "string"}, {"name": "dedatAssrs", "sequence": "string"}]}, {"name": "facts", "struct": [{"name": "bsisFacts", "sequence": "string"}]}, {"name": "dcss", "struct": [{"name": "courtDcss", "sequence": "string"}]}, {"name": "close", "struct": [{"name": "cnclsns", "sequence": "string"}]}], "splits": [{"name": "raw", "num_bytes": 10516539, "num_examples": 1000}, {"name": "label", "num_bytes": 10516539, "num_examples": 1000}], "download_size": 9908098, "dataset_size": 21033078}]}
2023-11-07T07:30:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "law" More Information needed
[ "# Dataset Card for \"law\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"law\"\n\nMore Information needed" ]
[ 6, 11 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"law\"\n\nMore Information needed" ]
051afbefe4bed7ad3aa6136b656ca3f3c4b42891
# Dataset Card for "chemnlp-orbnet-denali" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kjappelbaum/chemnlp-orbnet-denali
[ "region:us" ]
2023-11-07T06:41:09+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "smiles", "dtype": "string"}, {"name": "xyz", "dtype": "string"}, {"name": "mol2000", "dtype": "string"}, {"name": "mol3000", "dtype": "string"}, {"name": "charge", "dtype": "int64"}, {"name": "dft_energy", "dtype": "float64"}, {"name": "xtb1_energy", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 6436401032, "num_examples": 1053275}], "download_size": 2534938845, "dataset_size": 6436401032}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-07T07:18:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "chemnlp-orbnet-denali" More Information needed
[ "# Dataset Card for \"chemnlp-orbnet-denali\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"chemnlp-orbnet-denali\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"chemnlp-orbnet-denali\"\n\nMore Information needed" ]
656240134982d7a845017dd78f74f82c9a518eb2
# Dataset Card for "PMUL4976_only_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Brendan/PMUL4976_only_dataset
[ "region:us" ]
2023-11-07T07:03:29+00:00
{"dataset_info": {"features": [{"name": "dialogue_id", "dtype": "string"}, {"name": "turn_id", "dtype": "int8"}, {"name": "domains", "sequence": "string"}, {"name": "system_utterances", "sequence": "string"}, {"name": "user_utterances", "sequence": "string"}, {"name": "slot_values", "struct": [{"name": "hotel", "struct": [{"name": "price range", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "parking", "dtype": "string"}, {"name": "book day", "dtype": "string"}, {"name": "book people", "dtype": "string"}, {"name": "book stay", "dtype": "string"}, {"name": "stars", "dtype": "string"}, {"name": "internet", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "area", "dtype": "string"}]}, {"name": "train", "struct": [{"name": "arrive by", "dtype": "string"}, {"name": "departure", "dtype": "string"}, {"name": "day", "dtype": "string"}, {"name": "book people", "dtype": "string"}, {"name": "leave at", "dtype": "string"}, {"name": "destination", "dtype": "string"}]}, {"name": "attraction", "struct": [{"name": "area", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "type", "dtype": "string"}]}, {"name": "restaurant", "struct": [{"name": "price range", "dtype": "string"}, {"name": "area", "dtype": "string"}, {"name": "food", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "book day", "dtype": "string"}, {"name": "book people", "dtype": "string"}, {"name": "book time", "dtype": "string"}]}, {"name": "hospital", "struct": [{"name": "department", "dtype": "string"}]}, {"name": "taxi", "struct": [{"name": "leave at", "dtype": "string"}, {"name": "destination", "dtype": "string"}, {"name": "departure", "dtype": "string"}, {"name": "arrive by", "dtype": "string"}]}, {"name": "bus", "struct": [{"name": "departure", "dtype": "string"}, {"name": "destination", "dtype": "string"}, {"name": "leave at", "dtype": "string"}, {"name": "day", "dtype": "string"}]}, {"name": "police", "struct": [{"name": "name", "dtype": "string"}]}]}, {"name": "turn_slot_values", "struct": [{"name": "hotel", "struct": [{"name": "price range", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "parking", "dtype": "string"}, {"name": "book day", "dtype": "string"}, {"name": "book people", "dtype": "string"}, {"name": "book stay", "dtype": "string"}, {"name": "stars", "dtype": "string"}, {"name": "internet", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "area", "dtype": "string"}]}, {"name": "train", "struct": [{"name": "arrive by", "dtype": "string"}, {"name": "departure", "dtype": "string"}, {"name": "day", "dtype": "string"}, {"name": "book people", "dtype": "string"}, {"name": "leave at", "dtype": "string"}, {"name": "destination", "dtype": "string"}]}, {"name": "attraction", "struct": [{"name": "area", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "type", "dtype": "string"}]}, {"name": "restaurant", "struct": [{"name": "price range", "dtype": "string"}, {"name": "area", "dtype": "string"}, {"name": "food", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "book day", "dtype": "string"}, {"name": "book people", "dtype": "string"}, {"name": "book time", "dtype": "string"}]}, {"name": "hospital", "struct": [{"name": "department", "dtype": "string"}]}, {"name": "taxi", "struct": [{"name": "leave at", "dtype": "string"}, {"name": "destination", "dtype": "string"}, {"name": "departure", "dtype": "string"}, {"name": "arrive by", "dtype": "string"}]}, {"name": "bus", "struct": [{"name": "departure", "dtype": "string"}, {"name": "destination", "dtype": "string"}, {"name": "leave at", "dtype": "string"}, {"name": "day", "dtype": "string"}]}, {"name": "police", "struct": [{"name": "name", "dtype": "string"}]}]}, {"name": "last_slot_values", "struct": [{"name": "hotel", "struct": [{"name": "price range", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "parking", "dtype": "string"}, {"name": "book day", "dtype": "string"}, {"name": "book people", "dtype": "string"}, {"name": "book stay", "dtype": "string"}, {"name": "stars", "dtype": "string"}, {"name": "internet", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "area", "dtype": "string"}]}, {"name": "train", "struct": [{"name": "arrive by", "dtype": "string"}, {"name": "departure", "dtype": "string"}, {"name": "day", "dtype": "string"}, {"name": "book people", "dtype": "string"}, {"name": "leave at", "dtype": "string"}, {"name": "destination", "dtype": "string"}]}, {"name": "attraction", "struct": [{"name": "area", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "type", "dtype": "string"}]}, {"name": "restaurant", "struct": [{"name": "price range", "dtype": "string"}, {"name": "area", "dtype": "string"}, {"name": "food", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "book day", "dtype": "string"}, {"name": "book people", "dtype": "string"}, {"name": "book time", "dtype": "string"}]}, {"name": "hospital", "struct": [{"name": "department", "dtype": "string"}]}, {"name": "taxi", "struct": [{"name": "leave at", "dtype": "string"}, {"name": "destination", "dtype": "string"}, {"name": "departure", "dtype": "string"}, {"name": "arrive by", "dtype": "string"}]}, {"name": "bus", "struct": [{"name": "departure", "dtype": "string"}, {"name": "destination", "dtype": "string"}, {"name": "leave at", "dtype": "string"}, {"name": "day", "dtype": "string"}]}, {"name": "police", "struct": [{"name": "name", "dtype": "string"}]}]}, {"name": "system_response_acts", "sequence": "string"}, {"name": "system_response", "dtype": "string"}], "splits": [{"name": "valid_evaluable_only", "num_bytes": 15490.408443056875, "num_examples": 11}], "download_size": 59035, "dataset_size": 15490.408443056875}}
2023-11-07T07:03:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "PMUL4976_only_dataset" More Information needed
[ "# Dataset Card for \"PMUL4976_only_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"PMUL4976_only_dataset\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"PMUL4976_only_dataset\"\n\nMore Information needed" ]
f023a57bea99a76c4e62edcfe2dfbbb0bfcd4607
# Wikipedia Japanese data (20231030) - Source Date: 2023/10/30 - Source: https://dumps.wikimedia.org/other/cirrussearch/ # License CC BY-SA 4.0 # Example WIP
hotchpotch/wikipedia-ja-20231030
[ "size_categories:1M<n<10M", "language:ja", "license:cc", "region:us" ]
2023-11-07T07:04:51+00:00
{"language": ["ja"], "license": "cc", "size_categories": ["1M<n<10M"], "pretty_name": "Wikipedia Japanese dump data", "dataset_info": [{"config_name": "chunked", "features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "row_id", "dtype": "int64"}, {"name": "chunk_row_id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "overlap_text", "dtype": "string"}, {"name": "overlap_start", "dtype": "int64"}, {"name": "overlap_end", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 8594421711, "num_examples": 6577416}], "download_size": 4767055138, "dataset_size": 8594421711}, {"config_name": "default", "features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "row_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 6680840005, "num_examples": 1390769}], "download_size": 3779687960, "dataset_size": 6680840005}], "configs": [{"config_name": "chunked", "data_files": [{"split": "train", "path": "chunked/train-*"}]}, {"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-13T11:00:53+00:00
[]
[ "ja" ]
TAGS #size_categories-1M<n<10M #language-Japanese #license-cc #region-us
# Wikipedia Japanese data (20231030) - Source Date: 2023/10/30 - Source: URL # License CC BY-SA 4.0 # Example WIP
[ "# Wikipedia Japanese data (20231030)\n\n- Source Date: 2023/10/30\n- Source: URL", "# License\n\nCC BY-SA 4.0", "# Example\n\nWIP" ]
[ "TAGS\n#size_categories-1M<n<10M #language-Japanese #license-cc #region-us \n", "# Wikipedia Japanese data (20231030)\n\n- Source Date: 2023/10/30\n- Source: URL", "# License\n\nCC BY-SA 4.0", "# Example\n\nWIP" ]
[ 29, 19, 7, 5 ]
[ "passage: TAGS\n#size_categories-1M<n<10M #language-Japanese #license-cc #region-us \n# Wikipedia Japanese data (20231030)\n\n- Source Date: 2023/10/30\n- Source: URL# License\n\nCC BY-SA 4.0# Example\n\nWIP" ]
ec40d770f77a18c83d242f3f33f6c14ea2f91fb2
# Dataset Card for "squad_train500_eval100" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/squad_train500_eval100
[ "region:us" ]
2023-11-07T07:45:05+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}, {"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "sequence": [{"name": "text", "dtype": "string"}, {"name": "answer_start", "dtype": "int32"}]}, {"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "validation", "num_bytes": 138876, "num_examples": 100}, {"name": "train", "num_bytes": 1088215, "num_examples": 500}], "download_size": 206819, "dataset_size": 1227091}}
2023-11-07T08:45:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "squad_train500_eval100" More Information needed
[ "# Dataset Card for \"squad_train500_eval100\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"squad_train500_eval100\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"squad_train500_eval100\"\n\nMore Information needed" ]
ddc5b6c210d59caaf150d81f7aa6bc9e907275b9
# Dataset Card for "xlsum_data-cstnews_results" rouge={'rouge1': 0.15625934233588232, 'rouge2': 0.045078034517833404, 'rougeL': 0.09671713244776929, 'rougeLsum': 0.09671713244776929} Bert={'precision': 0.6181117028517175, 'recall': 0.7212475901364449, 'f1': 0.665386434830855} mover = 0.5427366803250515
arthurmluz/xlsum_data-cstnews_results
[ "region:us" ]
2023-11-07T07:48:15+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 34559131, "num_examples": 7175}], "download_size": 21461885, "dataset_size": 34559131}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-13T20:10:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "xlsum_data-cstnews_results" rouge={'rouge1': 0.15625934233588232, 'rouge2': 0.045078034517833404, 'rougeL': 0.09671713244776929, 'rougeLsum': 0.09671713244776929} Bert={'precision': 0.6181117028517175, 'recall': 0.7212475901364449, 'f1': 0.665386434830855} mover = 0.5427366803250515
[ "# Dataset Card for \"xlsum_data-cstnews_results\"\n\nrouge={'rouge1': 0.15625934233588232, 'rouge2': 0.045078034517833404, 'rougeL': 0.09671713244776929, 'rougeLsum': 0.09671713244776929}\n\nBert={'precision': 0.6181117028517175, 'recall': 0.7212475901364449, 'f1': 0.665386434830855}\n\nmover = 0.5427366803250515" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"xlsum_data-cstnews_results\"\n\nrouge={'rouge1': 0.15625934233588232, 'rouge2': 0.045078034517833404, 'rougeL': 0.09671713244776929, 'rougeLsum': 0.09671713244776929}\n\nBert={'precision': 0.6181117028517175, 'recall': 0.7212475901364449, 'f1': 0.665386434830855}\n\nmover = 0.5427366803250515" ]
[ 6, 136 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"xlsum_data-cstnews_results\"\n\nrouge={'rouge1': 0.15625934233588232, 'rouge2': 0.045078034517833404, 'rougeL': 0.09671713244776929, 'rougeLsum': 0.09671713244776929}\n\nBert={'precision': 0.6181117028517175, 'recall': 0.7212475901364449, 'f1': 0.665386434830855}\n\nmover = 0.5427366803250515" ]
6f6a514fc8c44309d4c5f17c40d48be8bc1cc491
# Dataset Card for "LLM_DATASET_bbox" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
GHOFRANEE/LLM_DATASET_bbox
[ "region:us" ]
2023-11-07T07:52:03+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "input", "dtype": "string"}, {"name": "output", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1428578, "num_examples": 155}], "download_size": 584470, "dataset_size": 1428578}}
2023-11-07T07:52:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "LLM_DATASET_bbox" More Information needed
[ "# Dataset Card for \"LLM_DATASET_bbox\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"LLM_DATASET_bbox\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"LLM_DATASET_bbox\"\n\nMore Information needed" ]
4fd37e3cdaf96c2ea6bf2ed966ce6314f448593c
--- # Dataset Card for Nexdata/Dari_Conversational_Speech_Data_by_Telephone ## Description The 330 Hours - Dari Conversational Speech Data collected by telephone involved 452 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1240?source=Huggingface # Specifications ## Format 8kHz, 8bit, ulaw/alaw pcm, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 452 speakers totally, with 94% male and 6% female; ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language Dari ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 95% # Licensing Information Commercial License
Nexdata/Dari_Conversational_Speech_Data_by_Telephone
[ "task_categories:conversational", "task_categories:automatic-speech-recognition", "region:us" ]
2023-11-07T08:08:01+00:00
{"task_categories": ["conversational", "automatic-speech-recognition"]}
2023-11-22T09:36:55+00:00
[]
[]
TAGS #task_categories-conversational #task_categories-automatic-speech-recognition #region-us
--- # Dataset Card for Nexdata/Dari_Conversational_Speech_Data_by_Telephone ## Description The 330 Hours - Dari Conversational Speech Data collected by telephone involved 452 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 8kHz, 8bit, ulaw/alaw pcm, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 452 speakers totally, with 94% male and 6% female; ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language Dari ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 95% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Dari_Conversational_Speech_Data_by_Telephone", "## Description\nThe 330 Hours - Dari Conversational Speech Data collected by telephone involved 452 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz, 8bit, ulaw/alaw pcm, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n452 speakers totally, with 94% male and 6% female;", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nDari", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 95%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-conversational #task_categories-automatic-speech-recognition #region-us \n", "# Dataset Card for Nexdata/Dari_Conversational_Speech_Data_by_Telephone", "## Description\nThe 330 Hours - Dari Conversational Speech Data collected by telephone involved 452 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz, 8bit, ulaw/alaw pcm, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n452 speakers totally, with 94% male and 6% female;", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nDari", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 95%", "# Licensing Information\nCommercial License" ]
[ 32, 26, 141, 3, 20, 16, 30, 18, 17, 8, 3, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-conversational #task_categories-automatic-speech-recognition #region-us \n# Dataset Card for Nexdata/Dari_Conversational_Speech_Data_by_Telephone## Description\nThe 330 Hours - Dari Conversational Speech Data collected by telephone involved 452 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n8kHz, 8bit, ulaw/alaw pcm, mono channel;## Recording Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\n452 speakers totally, with 94% male and 6% female;## Annotation\nannotating for the transcription text, speaker identification and gender## Device\nTelephony recording system;## Language\nDari## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 95%# Licensing Information\nCommercial License" ]
b6e44371b5a3c245250ea2db874cc0602a8fb3df
# Dataset Card for "sanskrit_eng" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mangeshdiyewar/sanskrit_eng
[ "region:us" ]
2023-11-07T08:10:11+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 36409226, "num_examples": 75162}, {"name": "test", "num_bytes": 5652086, "num_examples": 11722}, {"name": "validation", "num_bytes": 3037311, "num_examples": 6149}], "download_size": 22123896, "dataset_size": 45098623}}
2023-11-07T08:12:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sanskrit_eng" More Information needed
[ "# Dataset Card for \"sanskrit_eng\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sanskrit_eng\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"sanskrit_eng\"\n\nMore Information needed" ]
b6033a653f09f4cb0d3e6f6730c32679ea1d1b10
# Dataset Card for "factures_generales" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ayoub999/factures_generales
[ "region:us" ]
2023-11-07T08:14:32+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "bboxes", "sequence": {"sequence": "int64"}}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "Ref", "2": "NumFa", "3": "Fourniss", "4": "DateFa", "5": "DateLim", "6": "TotalHT", "7": "TVA", "8": "TotalTTc", "9": "unitP", "10": "Qt", "11": "TVAP", "12": "D\u00e9signation", "13": "Adresse"}}}}, {"name": "tokens", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1242002.0, "num_examples": 4}, {"name": "test", "num_bytes": 621001.0, "num_examples": 2}], "download_size": 1794949, "dataset_size": 1863003.0}}
2023-11-08T16:48:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "factures_generales" More Information needed
[ "# Dataset Card for \"factures_generales\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"factures_generales\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"factures_generales\"\n\nMore Information needed" ]
c8c156d77aa5bc6aab25916a0b0eec0e32d1aadf
--- # Dataset Card for Nexdata/Indonesian_Conversational_Speech_Data_by_Mobile_Phone ## Description The 300 Hours - Indonesian conversational speech data collected by phone involved about 300 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1239?source=Huggingface # Specifications ## Format 16kHz 16bit, uncompressed wav, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics About 300 speakers totally ## Annotation annotating for the transcription text, speaker identification and gender ## Device Android mobile phone, iPhone; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
Nexdata/Indonesian_Conversational_Speech_Data_by_Mobile_Phone
[ "task_categories:conversational", "task_categories:automatic-speech-recognition", "language:id", "region:us" ]
2023-11-07T08:18:16+00:00
{"language": ["id"], "task_categories": ["conversational", "automatic-speech-recognition"]}
2023-11-22T09:37:55+00:00
[]
[ "id" ]
TAGS #task_categories-conversational #task_categories-automatic-speech-recognition #language-Indonesian #region-us
--- # Dataset Card for Nexdata/Indonesian_Conversational_Speech_Data_by_Mobile_Phone ## Description The 300 Hours - Indonesian conversational speech data collected by phone involved about 300 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 16kHz 16bit, uncompressed wav, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics About 300 speakers totally ## Annotation annotating for the transcription text, speaker identification and gender ## Device Android mobile phone, iPhone; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Indonesian_Conversational_Speech_Data_by_Mobile_Phone", "## Description\nThe 300 Hours - Indonesian conversational speech data collected by phone involved about 300 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz 16bit, uncompressed wav, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\nAbout 300 speakers totally", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nAndroid mobile phone, iPhone;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-conversational #task_categories-automatic-speech-recognition #language-Indonesian #region-us \n", "# Dataset Card for Nexdata/Indonesian_Conversational_Speech_Data_by_Mobile_Phone", "## Description\nThe 300 Hours - Indonesian conversational speech data collected by phone involved about 300 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz 16bit, uncompressed wav, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\nAbout 300 speakers totally", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nAndroid mobile phone, iPhone;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ 37, 27, 145, 3, 18, 14, 30, 9, 17, 8, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-conversational #task_categories-automatic-speech-recognition #language-Indonesian #region-us \n# Dataset Card for Nexdata/Indonesian_Conversational_Speech_Data_by_Mobile_Phone## Description\nThe 300 Hours - Indonesian conversational speech data collected by phone involved about 300 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz 16bit, uncompressed wav, mono channel;## Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\nAbout 300 speakers totally## Annotation\nannotating for the transcription text, speaker identification and gender## Device\nAndroid mobile phone, iPhone;## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 98%# Licensing Information\nCommercial License" ]
a0da3a6d4837dbf8ae039896e73aec0291beb585
# Dataset Card for "counsel-ko_1024" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lectura/counsel-ko_1024
[ "region:us" ]
2023-11-07T08:19:08+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}], "splits": [{"name": "train", "num_bytes": 4239400, "num_examples": 1034}], "download_size": 1794292, "dataset_size": 4239400}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-07T08:19:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "counsel-ko_1024" More Information needed
[ "# Dataset Card for \"counsel-ko_1024\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"counsel-ko_1024\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"counsel-ko_1024\"\n\nMore Information needed" ]
b9da26503ecd5f12fda3894bb6b9f0789efe0706
--- # Dataset Card for Nexdata/Filipino_Conversational_Speech_Data_by_Mobile_Phone ## Description The 104 Hours - Filipino Conversational Speech Data by Mobile Phone collected by phone involved 140 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1238?source=Huggingface # Specifications ## Format 16kHz 16bit, uncompressed wav, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 140 speakers totally, with 52% male and 48% female; ## Annotation annotating for the transcription text, speaker identification and gender ## Device Android mobile phone, iPhone; ## Language Filipino; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
Nexdata/Filipino_Conversational_Speech_Data_by_Mobile_Phone
[ "task_categories:conversational", "language:tl", "region:us" ]
2023-11-07T08:20:51+00:00
{"language": ["tl"], "task_categories": ["conversational"]}
2023-11-10T07:43:49+00:00
[]
[ "tl" ]
TAGS #task_categories-conversational #language-Tagalog #region-us
--- # Dataset Card for Nexdata/Filipino_Conversational_Speech_Data_by_Mobile_Phone ## Description The 104 Hours - Filipino Conversational Speech Data by Mobile Phone collected by phone involved 140 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 16kHz 16bit, uncompressed wav, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 140 speakers totally, with 52% male and 48% female; ## Annotation annotating for the transcription text, speaker identification and gender ## Device Android mobile phone, iPhone; ## Language Filipino; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Filipino_Conversational_Speech_Data_by_Mobile_Phone", "## Description\nThe 104 Hours - Filipino Conversational Speech Data by Mobile Phone collected by phone involved 140 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz 16bit, uncompressed wav, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n140 speakers totally, with 52% male and 48% female;", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nAndroid mobile phone, iPhone;", "## Language\nFilipino;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-conversational #language-Tagalog #region-us \n", "# Dataset Card for Nexdata/Filipino_Conversational_Speech_Data_by_Mobile_Phone", "## Description\nThe 104 Hours - Filipino Conversational Speech Data by Mobile Phone collected by phone involved 140 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz 16bit, uncompressed wav, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n140 speakers totally, with 52% male and 48% female;", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nAndroid mobile phone, iPhone;", "## Language\nFilipino;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ 22, 28, 147, 3, 18, 14, 30, 18, 17, 8, 4, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-conversational #language-Tagalog #region-us \n# Dataset Card for Nexdata/Filipino_Conversational_Speech_Data_by_Mobile_Phone## Description\nThe 104 Hours - Filipino Conversational Speech Data by Mobile Phone collected by phone involved 140 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz 16bit, uncompressed wav, mono channel;## Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\n140 speakers totally, with 52% male and 48% female;## Annotation\nannotating for the transcription text, speaker identification and gender## Device\nAndroid mobile phone, iPhone;## Language\nFilipino;## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 98%# Licensing Information\nCommercial License" ]
d1e44b66481ebaed5cc59f81ac63b594d76274c4
--- # Dataset Card for Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone ## Description The 547 Hours - French Conversational Speech Data involved 964 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1233?source=Huggingface # Specifications ## Format 8kHz, 8bit, u-law pcm, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 964 speakers totally, with 41% male and 59% female; ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language French ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
Nexdata/French_Conversational_Speech_Data_by_Telephone
[ "task_categories:conversational", "language:fr", "region:us" ]
2023-11-07T08:23:14+00:00
{"language": ["fr"], "task_categories": ["conversational"]}
2024-01-26T08:55:15+00:00
[]
[ "fr" ]
TAGS #task_categories-conversational #language-French #region-us
--- # Dataset Card for Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone ## Description The 547 Hours - French Conversational Speech Data involved 964 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 8kHz, 8bit, u-law pcm, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 964 speakers totally, with 41% male and 59% female; ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language French ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone", "## Description\nThe 547 Hours - French Conversational Speech Data involved 964 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz, 8bit, u-law pcm, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n964 speakers totally, with 41% male and 59% female;", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nFrench", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-conversational #language-French #region-us \n", "# Dataset Card for Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone", "## Description\nThe 547 Hours - French Conversational Speech Data involved 964 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz, 8bit, u-law pcm, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n964 speakers totally, with 41% male and 59% female;", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nFrench", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ 22, 27, 135, 3, 18, 16, 30, 19, 17, 8, 3, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-conversational #language-French #region-us \n# Dataset Card for Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone## Description\nThe 547 Hours - French Conversational Speech Data involved 964 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n8kHz, 8bit, u-law pcm, mono channel;## Recording Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\n964 speakers totally, with 41% male and 59% female;## Annotation\nannotating for the transcription text, speaker identification and gender## Device\nTelephony recording system;## Language\nFrench## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 98%# Licensing Information\nCommercial License" ]
b6053d94525d7473c47e53b631903d2029446402
--- # Dataset Card for Nexdata/Italian_Conversational_Speech_Data_by_Telephone ## Description The 500 Hours - Italian Conversational Speech Data involved more than 700 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1232?source=Huggingface # Specifications ## Format 8kHz, 8bit, u-law pcm, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics about 700 people. ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language Italian ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
Nexdata/Italian_Conversational_Speech_Data_by_Telephone
[ "task_categories:conversational", "language:it", "region:us" ]
2023-11-07T08:25:05+00:00
{"language": ["it"], "task_categories": ["conversational"]}
2024-01-26T08:55:11+00:00
[]
[ "it" ]
TAGS #task_categories-conversational #language-Italian #region-us
--- # Dataset Card for Nexdata/Italian_Conversational_Speech_Data_by_Telephone ## Description The 500 Hours - Italian Conversational Speech Data involved more than 700 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 8kHz, 8bit, u-law pcm, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics about 700 people. ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language Italian ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Italian_Conversational_Speech_Data_by_Telephone", "## Description\nThe 500 Hours - Italian Conversational Speech Data involved more than 700 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz, 8bit, u-law pcm, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\nabout 700 people.", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nItalian", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-conversational #language-Italian #region-us \n", "# Dataset Card for Nexdata/Italian_Conversational_Speech_Data_by_Telephone", "## Description\nThe 500 Hours - Italian Conversational Speech Data involved more than 700 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz, 8bit, u-law pcm, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\nabout 700 people.", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nItalian", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ 21, 26, 135, 3, 18, 16, 30, 8, 17, 8, 3, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-conversational #language-Italian #region-us \n# Dataset Card for Nexdata/Italian_Conversational_Speech_Data_by_Telephone## Description\nThe 500 Hours - Italian Conversational Speech Data involved more than 700 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n8kHz, 8bit, u-law pcm, mono channel;## Recording Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\nabout 700 people.## Annotation\nannotating for the transcription text, speaker identification and gender## Device\nTelephony recording system;## Language\nItalian## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 98%# Licensing Information\nCommercial License" ]
a3b9f4f38863578d9aed3607cd752963b9368731
# Dataset Card for Nexdata/Spanish_Conversational_Speech_Data_by_Telephone ## Description The 500 Hours - Spanish Conversational Speech Data involved more than 700 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1234?source=Huggingface # Specifications ## Format 8kHz, 8bit, u-law pcm, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics about 700 people. ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language Spanish ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
Nexdata/Spanish_Conversational_Speech_Data_by_Telephone
[ "task_categories:conversational", "language:es", "region:us" ]
2023-11-07T08:26:44+00:00
{"language": ["es"], "task_categories": ["conversational"]}
2024-01-26T08:55:09+00:00
[]
[ "es" ]
TAGS #task_categories-conversational #language-Spanish #region-us
# Dataset Card for Nexdata/Spanish_Conversational_Speech_Data_by_Telephone ## Description The 500 Hours - Spanish Conversational Speech Data involved more than 700 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 8kHz, 8bit, u-law pcm, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics about 700 people. ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language Spanish ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Spanish_Conversational_Speech_Data_by_Telephone", "## Description\nThe 500 Hours - Spanish Conversational Speech Data involved more than 700 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz, 8bit, u-law pcm, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\nabout 700 people.", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nSpanish", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-conversational #language-Spanish #region-us \n", "# Dataset Card for Nexdata/Spanish_Conversational_Speech_Data_by_Telephone", "## Description\nThe 500 Hours - Spanish Conversational Speech Data involved more than 700 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz, 8bit, u-law pcm, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\nabout 700 people.", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nSpanish", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ 21, 26, 135, 3, 18, 16, 30, 8, 17, 8, 3, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-conversational #language-Spanish #region-us \n# Dataset Card for Nexdata/Spanish_Conversational_Speech_Data_by_Telephone## Description\nThe 500 Hours - Spanish Conversational Speech Data involved more than 700 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n8kHz, 8bit, u-law pcm, mono channel;## Recording Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\nabout 700 people.## Annotation\nannotating for the transcription text, speaker identification and gender## Device\nTelephony recording system;## Language\nSpanish## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 98%# Licensing Information\nCommercial License" ]
2de035675b45db120af642b68b2f3ef620488fbb
--- # Dataset Card for Nexdata/Chinese_Commands_Speech_Data_by_Bluetooth_Headset ## Description The data were collected from 491 Chinese speakers, each recording the same corpus with 17 commonly used command words. The proportion of male and female speakers is balanced, covering multiple age groups. The data is recorded by Bluetooth headset, covering the mainstream models in the market. It can be used for the voice assistant, command control, and other application scenarios. For more details, please refer to the link: https://www.nexdata.ai/datasets/1222?source=Huggingface # Specifications ## Format 16kHz, 16bit, uncompressed wav, mono channel ## Recording environment quiet indoor environment, without echo ## Recording content (read speech) including: '播放音乐', '开始播放', '暂停音乐', '暂停播放', '停止音乐', '停止播放', '接听电话', '挂断电话', '增大音量', '声音大点', '减小音量', '声音小点', '后退一首', '上一首', '快进一首', '下一首', '收藏音乐' ; a total of 17 Chinese Commands ## Speaker 491 Chinese, balance for gender. # Licensing Information Commercial License
Nexdata/Chinese_Commands_Speech_Data_by_Bluetooth_Headset
[ "task_categories:automatic-speech-recognition", "language:zh", "region:us" ]
2023-11-07T08:28:50+00:00
{"language": ["zh"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:41:23+00:00
[]
[ "zh" ]
TAGS #task_categories-automatic-speech-recognition #language-Chinese #region-us
--- # Dataset Card for Nexdata/Chinese_Commands_Speech_Data_by_Bluetooth_Headset ## Description The data were collected from 491 Chinese speakers, each recording the same corpus with 17 commonly used command words. The proportion of male and female speakers is balanced, covering multiple age groups. The data is recorded by Bluetooth headset, covering the mainstream models in the market. It can be used for the voice assistant, command control, and other application scenarios. For more details, please refer to the link: URL # Specifications ## Format 16kHz, 16bit, uncompressed wav, mono channel ## Recording environment quiet indoor environment, without echo ## Recording content (read speech) including: '播放音乐', '开始播放', '暂停音乐', '暂停播放', '停止音乐', '停止播放', '接听电话', '挂断电话', '增大音量', '声音大点', '减小音量', '声音小点', '后退一首', '上一首', '快进一首', '下一首', '收藏音乐' ; a total of 17 Chinese Commands ## Speaker 491 Chinese, balance for gender. # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Chinese_Commands_Speech_Data_by_Bluetooth_Headset", "## Description\nThe data were collected from 491 Chinese speakers, each recording the same corpus with 17 commonly used command words. The proportion of male and female speakers is balanced, covering multiple age groups. The data is recorded by Bluetooth headset, covering the mainstream models in the market. It can be used for the voice assistant, command control, and other application scenarios.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, uncompressed wav, mono channel", "## Recording environment\nquiet indoor environment, without echo", "## Recording content (read speech)\nincluding: '播放音乐', '开始播放', '暂停音乐', '暂停播放', '停止音乐', '停止播放', '接听电话', '挂断电话', '增大音量', '声音大点', '减小音量', '声音小点', '后退一首', '上一首', '快进一首', '下一首', '收藏音乐' ; a total of 17 Chinese Commands", "## Speaker\n491 Chinese, balance for gender.", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-Chinese #region-us \n", "# Dataset Card for Nexdata/Chinese_Commands_Speech_Data_by_Bluetooth_Headset", "## Description\nThe data were collected from 491 Chinese speakers, each recording the same corpus with 17 commonly used command words. The proportion of male and female speakers is balanced, covering multiple age groups. The data is recorded by Bluetooth headset, covering the mainstream models in the market. It can be used for the voice assistant, command control, and other application scenarios.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, uncompressed wav, mono channel", "## Recording environment\nquiet indoor environment, without echo", "## Recording content (read speech)\nincluding: '播放音乐', '开始播放', '暂停音乐', '暂停播放', '停止音乐', '停止播放', '接听电话', '挂断电话', '增大音量', '声音大点', '减小音量', '声音小点', '后退一首', '上一首', '快进一首', '下一首', '收藏音乐' ; a total of 17 Chinese Commands", "## Speaker\n491 Chinese, balance for gender.", "# Licensing Information\nCommercial License" ]
[ 27, 31, 93, 3, 18, 12, 112, 10, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-Chinese #region-us \n# Dataset Card for Nexdata/Chinese_Commands_Speech_Data_by_Bluetooth_Headset## Description\nThe data were collected from 491 Chinese speakers, each recording the same corpus with 17 commonly used command words. The proportion of male and female speakers is balanced, covering multiple age groups. The data is recorded by Bluetooth headset, covering the mainstream models in the market. It can be used for the voice assistant, command control, and other application scenarios.\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz, 16bit, uncompressed wav, mono channel## Recording environment\nquiet indoor environment, without echo## Recording content (read speech)\nincluding: '播放音乐', '开始播放', '暂停音乐', '暂停播放', '停止音乐', '停止播放', '接听电话', '挂断电话', '增大音量', '声音大点', '减小音量', '声音小点', '后退一首', '上一首', '快进一首', '下一首', '收藏音乐' ; a total of 17 Chinese Commands## Speaker\n491 Chinese, balance for gender.# Licensing Information\nCommercial License" ]
fe5d87248f0e265f69ac87919130ef5d9c67b6c0
--- # Dataset Card for Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone ## Description The 1,077 Hours - Thai Conversational Speech Data involved 1,986 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1210?source=Huggingface # Specifications ## Format 8kHz, 8bit, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 1,986 speakers totally, with 41% male and 59% female; ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language Thai ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 95% # Licensing Information Commercial License
Nexdata/Thai_Conversational_Speech_Data_by_Telephone
[ "task_categories:conversational", "language:th", "region:us" ]
2023-11-07T08:31:25+00:00
{"language": ["th"], "task_categories": ["conversational"]}
2023-11-10T07:42:40+00:00
[]
[ "th" ]
TAGS #task_categories-conversational #language-Thai #region-us
--- # Dataset Card for Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone ## Description The 1,077 Hours - Thai Conversational Speech Data involved 1,986 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 8kHz, 8bit, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 1,986 speakers totally, with 41% male and 59% female; ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language Thai ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 95% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone", "## Description\nThe 1,077 Hours - Thai Conversational Speech Data involved 1,986 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz, 8bit, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n1,986 speakers totally, with 41% male and 59% female;", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nThai", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 95%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-conversational #language-Thai #region-us \n", "# Dataset Card for Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone", "## Description\nThe 1,077 Hours - Thai Conversational Speech Data involved 1,986 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz, 8bit, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n1,986 speakers totally, with 41% male and 59% female;", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nThai", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 95%", "# Licensing Information\nCommercial License" ]
[ 21, 27, 135, 3, 12, 16, 30, 19, 17, 8, 3, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-conversational #language-Thai #region-us \n# Dataset Card for Nexdata/Pushtu_Conversational_Speech_Data_by_Telephone## Description\nThe 1,077 Hours - Thai Conversational Speech Data involved 1,986 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n8kHz, 8bit, mono channel;## Recording Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\n1,986 speakers totally, with 41% male and 59% female;## Annotation\nannotating for the transcription text, speaker identification and gender## Device\nTelephony recording system;## Language\nThai## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 95%# Licensing Information\nCommercial License" ]
39366837f9cd59d1321bd5750196134ccd226dd1
--- # Dataset Card for Nexdata/Brazilian_Portuguese_Conversational_Speech_Data_by_Mobile_Phone ## Description The 127 Hours - Brazilian Portuguese Conversational Speech Data involved 142 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1209?source=Huggingface # Specifications ## Format 16kHz, 16bit, uncompressed wav, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 142 speakers totally, with 50% males and 50% females. ## Annotation annotating for the transcription text, speaker identification gender and noise symbols; ## Device Android mobile phone, iPhone; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
Nexdata/Brazilian_Portuguese_Conversational_Speech_Data_by_Mobile_Phone
[ "task_categories:conversational", "language:pt", "region:us" ]
2023-11-07T08:33:00+00:00
{"language": ["pt"], "task_categories": ["conversational"]}
2023-11-10T07:39:17+00:00
[]
[ "pt" ]
TAGS #task_categories-conversational #language-Portuguese #region-us
--- # Dataset Card for Nexdata/Brazilian_Portuguese_Conversational_Speech_Data_by_Mobile_Phone ## Description The 127 Hours - Brazilian Portuguese Conversational Speech Data involved 142 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 16kHz, 16bit, uncompressed wav, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 142 speakers totally, with 50% males and 50% females. ## Annotation annotating for the transcription text, speaker identification gender and noise symbols; ## Device Android mobile phone, iPhone; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Brazilian_Portuguese_Conversational_Speech_Data_by_Mobile_Phone", "## Description\nThe 127 Hours - Brazilian Portuguese Conversational Speech Data involved 142 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, uncompressed wav, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n142 speakers totally, with 50% males and 50% females.", "## Annotation\nannotating for the transcription text, speaker identification gender and noise symbols;", "## Device\nAndroid mobile phone, iPhone;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-conversational #language-Portuguese #region-us \n", "# Dataset Card for Nexdata/Brazilian_Portuguese_Conversational_Speech_Data_by_Mobile_Phone", "## Description\nThe 127 Hours - Brazilian Portuguese Conversational Speech Data involved 142 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, uncompressed wav, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n142 speakers totally, with 50% males and 50% females.", "## Annotation\nannotating for the transcription text, speaker identification gender and noise symbols;", "## Device\nAndroid mobile phone, iPhone;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ 22, 32, 144, 3, 19, 16, 30, 18, 22, 8, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-conversational #language-Portuguese #region-us \n# Dataset Card for Nexdata/Brazilian_Portuguese_Conversational_Speech_Data_by_Mobile_Phone## Description\nThe 127 Hours - Brazilian Portuguese Conversational Speech Data involved 142 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz, 16bit, uncompressed wav, mono channel;## Recording Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\n142 speakers totally, with 50% males and 50% females.## Annotation\nannotating for the transcription text, speaker identification gender and noise symbols;## Device\nAndroid mobile phone, iPhone;## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 98%# Licensing Information\nCommercial License" ]
a86b1d496ac8773079df364f6f11e5cd8a9f7481
--- # Dataset Card for Nexdata/Russian_Conversational_Speech_Data_by_Mobile_Phone ## Description The 107 Hours - Russian Conversational Speech Data involved 134 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1208?source=Huggingface # Specifications ## Format 16kHz, 16bit, uncompressed wav, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 134 speakers totally, with 47% males and 53% females; ## Annotation annotating for the transcription text, speaker identification, gender and noise symbols ## Device Android mobile phone, iPhone; ## Language Russian ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
Nexdata/Russian_Conversational_Speech_Data_by_Mobile_Phone
[ "task_categories:conversational", "task_categories:automatic-speech-recognition", "language:ru", "region:us" ]
2023-11-07T08:34:48+00:00
{"language": ["ru"], "task_categories": ["conversational", "automatic-speech-recognition"]}
2024-01-26T08:54:56+00:00
[]
[ "ru" ]
TAGS #task_categories-conversational #task_categories-automatic-speech-recognition #language-Russian #region-us
--- # Dataset Card for Nexdata/Russian_Conversational_Speech_Data_by_Mobile_Phone ## Description The 107 Hours - Russian Conversational Speech Data involved 134 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 16kHz, 16bit, uncompressed wav, mono channel; ## Recording Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 134 speakers totally, with 47% males and 53% females; ## Annotation annotating for the transcription text, speaker identification, gender and noise symbols ## Device Android mobile phone, iPhone; ## Language Russian ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Russian_Conversational_Speech_Data_by_Mobile_Phone", "## Description\nThe 107 Hours - Russian Conversational Speech Data involved 134 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, uncompressed wav, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n134 speakers totally, with 47% males and 53% females;", "## Annotation\nannotating for the transcription text, speaker identification, gender and noise symbols", "## Device\nAndroid mobile phone, iPhone;", "## Language\nRussian", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-conversational #task_categories-automatic-speech-recognition #language-Russian #region-us \n", "# Dataset Card for Nexdata/Russian_Conversational_Speech_Data_by_Mobile_Phone", "## Description\nThe 107 Hours - Russian Conversational Speech Data involved 134 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, uncompressed wav, mono channel;", "## Recording Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n134 speakers totally, with 47% males and 53% females;", "## Annotation\nannotating for the transcription text, speaker identification, gender and noise symbols", "## Device\nAndroid mobile phone, iPhone;", "## Language\nRussian", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ 37, 27, 140, 3, 19, 16, 30, 20, 22, 8, 3, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-conversational #task_categories-automatic-speech-recognition #language-Russian #region-us \n# Dataset Card for Nexdata/Russian_Conversational_Speech_Data_by_Mobile_Phone## Description\nThe 107 Hours - Russian Conversational Speech Data involved 134 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz, 16bit, uncompressed wav, mono channel;## Recording Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\n134 speakers totally, with 47% males and 53% females;## Annotation\nannotating for the transcription text, speaker identification, gender and noise symbols## Device\nAndroid mobile phone, iPhone;## Language\nRussian## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 98%# Licensing Information\nCommercial License" ]
b5ff5bbea3e6a41920c1993fcfdc3047f5b311b3
--- # Dataset Card for Nexdata/Burmese_Conversational_Speech_Data_by_Mobile_Phone ## Description The 120 Hours - Burmese Conversational Speech Data involved more than 130 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1207?source=Huggingface # Specifications ## Format 16kHz 16bit, uncompressed wav, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 134 speakers totally, with 50% male and 50% female ## Annotation annotating for the transcription text, speaker identification and gender ## Device Android mobile phone, iPhone; ## Language Burmese; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 97% # Licensing Information Commercial License
Nexdata/Burmese_Conversational_Speech_Data_by_Mobile_Phone
[ "task_categories:conversational", "language:my", "region:us" ]
2023-11-07T08:37:04+00:00
{"language": ["my"], "task_categories": ["conversational"]}
2023-11-10T07:40:23+00:00
[]
[ "my" ]
TAGS #task_categories-conversational #language-Burmese #region-us
--- # Dataset Card for Nexdata/Burmese_Conversational_Speech_Data_by_Mobile_Phone ## Description The 120 Hours - Burmese Conversational Speech Data involved more than 130 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 16kHz 16bit, uncompressed wav, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 134 speakers totally, with 50% male and 50% female ## Annotation annotating for the transcription text, speaker identification and gender ## Device Android mobile phone, iPhone; ## Language Burmese; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 97% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Burmese_Conversational_Speech_Data_by_Mobile_Phone", "## Description\nThe 120 Hours - Burmese Conversational Speech Data involved more than 130 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz 16bit, uncompressed wav, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n134 speakers totally, with 50% male and 50% female", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nAndroid mobile phone, iPhone;", "## Language\nBurmese;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 97%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-conversational #language-Burmese #region-us \n", "# Dataset Card for Nexdata/Burmese_Conversational_Speech_Data_by_Mobile_Phone", "## Description\nThe 120 Hours - Burmese Conversational Speech Data involved more than 130 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz 16bit, uncompressed wav, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n134 speakers totally, with 50% male and 50% female", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nAndroid mobile phone, iPhone;", "## Language\nBurmese;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 97%", "# Licensing Information\nCommercial License" ]
[ 20, 26, 143, 3, 18, 14, 30, 15, 17, 8, 5, 11, 17, 9 ]
[ "passage: TAGS\n#task_categories-conversational #language-Burmese #region-us \n# Dataset Card for Nexdata/Burmese_Conversational_Speech_Data_by_Mobile_Phone## Description\nThe 120 Hours - Burmese Conversational Speech Data involved more than 130 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz 16bit, uncompressed wav, mono channel;## Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\n134 speakers totally, with 50% male and 50% female## Annotation\nannotating for the transcription text, speaker identification and gender## Device\nAndroid mobile phone, iPhone;## Language\nBurmese;## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 97%# Licensing Information\nCommercial License" ]
e71dffbb1054f6af094181906b712f87cfe99779
# Dataset Card for Nexdata/Indian_English_Spontaneous_Speech_Data ## Description The 501 Hours - Indian English Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: https://www.nexdata.ai/datasets/1175?source=Huggingface # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including interview, speech, live, etc. ## Language Indian English; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a Sentence Accuracy Rate (SAR) of being no less than 95%. # Licensing Information Commercial License
Nexdata/Indian_English_Spontaneous_Speech_Data
[ "task_categories:automatic-speech-recognition", "language:en", "region:us" ]
2023-11-07T08:42:01+00:00
{"language": ["en"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:37:03+00:00
[]
[ "en" ]
TAGS #task_categories-automatic-speech-recognition #language-English #region-us
# Dataset Card for Nexdata/Indian_English_Spontaneous_Speech_Data ## Description The 501 Hours - Indian English Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: URL # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including interview, speech, live, etc. ## Language Indian English; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a Sentence Accuracy Rate (SAR) of being no less than 95%. # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Indian_English_Spontaneous_Speech_Data", "## Description\nThe 501 Hours - Indian English Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding interview, speech, live, etc.", "## Language\nIndian English;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a Sentence Accuracy Rate (SAR) of being no less than 95%.", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-English #region-us \n", "# Dataset Card for Nexdata/Indian_English_Spontaneous_Speech_Data", "## Description\nThe 501 Hours - Indian English Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding interview, speech, live, etc.", "## Language\nIndian English;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a Sentence Accuracy Rate (SAR) of being no less than 95%.", "# Licensing Information\nCommercial License" ]
[ 26, 24, 87, 3, 12, 12, 5, 17, 15, 22, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-English #region-us \n# Dataset Card for Nexdata/Indian_English_Spontaneous_Speech_Data## Description\nThe 501 Hours - Indian English Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz, 16bit, mono channel;## Content category\nincluding interview, speech, live, etc.## Language\nIndian English;## Annotation\nannotation for the transcription text, speaker identification, gender;## Application scenarios\nspeech recognition, video caption generation and video content review;## Accuracy\nat a Sentence Accuracy Rate (SAR) of being no less than 95%.# Licensing Information\nCommercial License" ]
80c791772bb9a7037b2b55fc23429df1861ab824
# Dataset Card for Nexdata/Swedish_Spontaneous_Speech_Data ## Description The 225 Hours - Swedish Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: https://www.nexdata.ai/datasets/1249?source=Huggingface # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including self-meida,interview, etc. ## Language Swedish ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognitio, video caption generation and video content review; ## Accuracy at a Word Accuracy Rate (WAR) of being no less than 95%. # Licensing Information Commercial License
Nexdata/Swedish_Spontaneous_Speech_Data
[ "task_categories:automatic-speech-recognition", "language:sv", "region:us" ]
2023-11-07T08:45:28+00:00
{"language": ["sv"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:38:51+00:00
[]
[ "sv" ]
TAGS #task_categories-automatic-speech-recognition #language-Swedish #region-us
# Dataset Card for Nexdata/Swedish_Spontaneous_Speech_Data ## Description The 225 Hours - Swedish Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: URL # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including self-meida,interview, etc. ## Language Swedish ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognitio, video caption generation and video content review; ## Accuracy at a Word Accuracy Rate (WAR) of being no less than 95%. # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Swedish_Spontaneous_Speech_Data", "## Description\nThe 225 Hours - Swedish Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding self-meida,interview, etc.", "## Language\nSwedish", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognitio, video caption generation and video content review;", "## Accuracy\nat a Word Accuracy Rate (WAR) of being no less than 95%.", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-Swedish #region-us \n", "# Dataset Card for Nexdata/Swedish_Spontaneous_Speech_Data", "## Description\nThe 225 Hours - Swedish Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding self-meida,interview, etc.", "## Language\nSwedish", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognitio, video caption generation and video content review;", "## Accuracy\nat a Word Accuracy Rate (WAR) of being no less than 95%.", "# Licensing Information\nCommercial License" ]
[ 28, 23, 85, 3, 12, 14, 3, 17, 17, 21, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-Swedish #region-us \n# Dataset Card for Nexdata/Swedish_Spontaneous_Speech_Data## Description\nThe 225 Hours - Swedish Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz, 16bit, mono channel;## Content category\nincluding self-meida,interview, etc.## Language\nSwedish## Annotation\nannotation for the transcription text, speaker identification, gender;## Application scenarios\nspeech recognitio, video caption generation and video content review;## Accuracy\nat a Word Accuracy Rate (WAR) of being no less than 95%.# Licensing Information\nCommercial License" ]
9116a0c8d9240e30f8ed1eaed5a1603f8a403bdc
# Dataset Card for Nexdata/Latin_American_Spanish_Children_Spontaneous_Speech_Data ## Description The 189 Hours - Latin American Spanish Child's Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: https://www.nexdata.ai/datasets/1250?source=Huggingface # Specifications ## Format 16kHz, 16bit, mono channel; ## age children aged 12 and under ## Content category including interview, self-meida,variety show, etc. ## Language Latin American Spanish; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a Word Accuracy Rate (SAR) of being no less than 98%. # Licensing Information Commercial License
Nexdata/Latin_American_Spanish_Children_Spontaneous_Speech_Data
[ "task_categories:automatic-speech-recognition", "language:es", "region:us" ]
2023-11-07T08:48:39+00:00
{"language": ["es"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:36:43+00:00
[]
[ "es" ]
TAGS #task_categories-automatic-speech-recognition #language-Spanish #region-us
# Dataset Card for Nexdata/Latin_American_Spanish_Children_Spontaneous_Speech_Data ## Description The 189 Hours - Latin American Spanish Child's Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: URL # Specifications ## Format 16kHz, 16bit, mono channel; ## age children aged 12 and under ## Content category including interview, self-meida,variety show, etc. ## Language Latin American Spanish; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a Word Accuracy Rate (SAR) of being no less than 98%. # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Latin_American_Spanish_Children_Spontaneous_Speech_Data", "## Description\nThe 189 Hours - Latin American Spanish Child's Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## age\nchildren aged 12 and under", "## Content category\nincluding interview, self-meida,variety show, etc.", "## Language\nLatin American Spanish;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a Word Accuracy Rate (SAR) of being no less than 98%.", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-Spanish #region-us \n", "# Dataset Card for Nexdata/Latin_American_Spanish_Children_Spontaneous_Speech_Data", "## Description\nThe 189 Hours - Latin American Spanish Child's Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## age\nchildren aged 12 and under", "## Content category\nincluding interview, self-meida,variety show, etc.", "## Language\nLatin American Spanish;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a Word Accuracy Rate (SAR) of being no less than 98%.", "# Licensing Information\nCommercial License" ]
[ 27, 31, 98, 3, 12, 8, 17, 6, 17, 15, 21, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-Spanish #region-us \n# Dataset Card for Nexdata/Latin_American_Spanish_Children_Spontaneous_Speech_Data## Description\nThe 189 Hours - Latin American Spanish Child's Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz, 16bit, mono channel;## age\nchildren aged 12 and under## Content category\nincluding interview, self-meida,variety show, etc.## Language\nLatin American Spanish;## Annotation\nannotation for the transcription text, speaker identification, gender;## Application scenarios\nspeech recognition, video caption generation and video content review;## Accuracy\nat a Word Accuracy Rate (SAR) of being no less than 98%.# Licensing Information\nCommercial License" ]
ea80c1fa1e2bd9134cd5479e69b6bb93126f0f3d
# Dataset Card for Nexdata/British_English_Spontaneous_Speech_Data ## Description 1,013 Hours – British English Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: https://www.nexdata.ai/datasets/1262?source=Huggingface # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including conversation, self-media, etc. ## Language British English; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a Sentence Accuracy Rate (SAR) of being no less than 95%. # Licensing Information Commercial License
Nexdata/British_English_Spontaneous_Speech_Data
[ "task_categories:automatic-speech-recognition", "language:en", "region:us" ]
2023-11-07T08:50:19+00:00
{"language": ["en"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:36:53+00:00
[]
[ "en" ]
TAGS #task_categories-automatic-speech-recognition #language-English #region-us
# Dataset Card for Nexdata/British_English_Spontaneous_Speech_Data ## Description 1,013 Hours – British English Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: URL # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including conversation, self-media, etc. ## Language British English; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a Sentence Accuracy Rate (SAR) of being no less than 95%. # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/British_English_Spontaneous_Speech_Data", "## Description\n1,013 Hours – British English Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding conversation, self-media, etc.", "## Language\nBritish English;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a Sentence Accuracy Rate (SAR) of being no less than 95%.", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-English #region-us \n", "# Dataset Card for Nexdata/British_English_Spontaneous_Speech_Data", "## Description\n1,013 Hours – British English Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding conversation, self-media, etc.", "## Language\nBritish English;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a Sentence Accuracy Rate (SAR) of being no less than 95%.", "# Licensing Information\nCommercial License" ]
[ 26, 24, 87, 3, 12, 12, 5, 17, 15, 22, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-English #region-us \n# Dataset Card for Nexdata/British_English_Spontaneous_Speech_Data## Description\n1,013 Hours – British English Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz, 16bit, mono channel;## Content category\nincluding conversation, self-media, etc.## Language\nBritish English;## Annotation\nannotation for the transcription text, speaker identification, gender;## Application scenarios\nspeech recognition, video caption generation and video content review;## Accuracy\nat a Sentence Accuracy Rate (SAR) of being no less than 95%.# Licensing Information\nCommercial License" ]
2e8666631e27a87bb787772158da3eab8e351978
# Dataset Card for Nexdata/Hindi_Spontaneous_Speech_Data ## Description 494 Hours - Hindi Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: https://www.nexdata.ai/datasets/1269?source=Huggingface # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including education, interview, sports, etc. ## Language Hindi; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a Word Accuracy Rate (WAR) of being no less than 98%. # Licensing Information Commercial License
Nexdata/Hindi_Spontaneous_Speech_Data
[ "task_categories:automatic-speech-recognition", "language:hi", "region:us" ]
2023-11-07T08:51:50+00:00
{"language": ["hi"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:36:04+00:00
[]
[ "hi" ]
TAGS #task_categories-automatic-speech-recognition #language-Hindi #region-us
# Dataset Card for Nexdata/Hindi_Spontaneous_Speech_Data ## Description 494 Hours - Hindi Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: URL # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including education, interview, sports, etc. ## Language Hindi; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a Word Accuracy Rate (WAR) of being no less than 98%. # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Hindi_Spontaneous_Speech_Data", "## Description\n494 Hours - Hindi Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding education, interview, sports, etc.", "## Language\nHindi;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a Word Accuracy Rate (WAR) of being no less than 98%.", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-Hindi #region-us \n", "# Dataset Card for Nexdata/Hindi_Spontaneous_Speech_Data", "## Description\n494 Hours - Hindi Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding education, interview, sports, etc.", "## Language\nHindi;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a Word Accuracy Rate (WAR) of being no less than 98%.", "# Licensing Information\nCommercial License" ]
[ 26, 21, 86, 3, 12, 12, 4, 17, 15, 21, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-Hindi #region-us \n# Dataset Card for Nexdata/Hindi_Spontaneous_Speech_Data## Description\n494 Hours - Hindi Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz, 16bit, mono channel;## Content category\nincluding education, interview, sports, etc.## Language\nHindi;## Annotation\nannotation for the transcription text, speaker identification, gender;## Application scenarios\nspeech recognition, video caption generation and video content review;## Accuracy\nat a Word Accuracy Rate (WAR) of being no less than 98%.# Licensing Information\nCommercial License" ]
f9849dd7a92d15a44a57dc78105885c5d321135c
# Dataset Card for Nexdata/Korean_Spontaneous_Speech_Data ## Description 396 Hours - Korean Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: https://www.nexdata.ai/datasets/1270?source=Huggingface # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including live, variety-show, speech, etc. ## Language Korean; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a Sentence Accuracy Rate (SAR) of being no less than 95%. # Licensing Information Commercial License
Nexdata/Korean_Spontaneous_Speech_Data
[ "task_categories:automatic-speech-recognition", "task_categories:conversational", "language:ko", "region:us" ]
2023-11-07T08:53:43+00:00
{"language": ["ko"], "task_categories": ["automatic-speech-recognition", "conversational"]}
2023-11-10T07:36:29+00:00
[]
[ "ko" ]
TAGS #task_categories-automatic-speech-recognition #task_categories-conversational #language-Korean #region-us
# Dataset Card for Nexdata/Korean_Spontaneous_Speech_Data ## Description 396 Hours - Korean Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: URL # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including live, variety-show, speech, etc. ## Language Korean; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a Sentence Accuracy Rate (SAR) of being no less than 95%. # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Korean_Spontaneous_Speech_Data", "## Description\n396 Hours - Korean Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding live, variety-show, speech, etc.", "## Language\nKorean;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a Sentence Accuracy Rate (SAR) of being no less than 95%.", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #task_categories-conversational #language-Korean #region-us \n", "# Dataset Card for Nexdata/Korean_Spontaneous_Speech_Data", "## Description\n396 Hours - Korean Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding live, variety-show, speech, etc.", "## Language\nKorean;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a Sentence Accuracy Rate (SAR) of being no less than 95%.", "# Licensing Information\nCommercial License" ]
[ 37, 22, 86, 3, 12, 14, 4, 17, 15, 22, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #task_categories-conversational #language-Korean #region-us \n# Dataset Card for Nexdata/Korean_Spontaneous_Speech_Data## Description\n396 Hours - Korean Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz, 16bit, mono channel;## Content category\nincluding live, variety-show, speech, etc.## Language\nKorean;## Annotation\nannotation for the transcription text, speaker identification, gender;## Application scenarios\nspeech recognition, video caption generation and video content review;## Accuracy\nat a Sentence Accuracy Rate (SAR) of being no less than 95%.# Licensing Information\nCommercial License" ]
46cd67ee3562ba78d8dca1e6043e71dd1e7a0a71
# Dataset Card for Nexdata/Russian_Spontaneous_Speech_Data ## Description 503 Hours - Russian Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: https://www.nexdata.ai/datasets/1271?source=Huggingface # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including education, interview, sports, etc ## Language Russian; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a word Accuracy Rate (WAR) of being no less than 98%. # Licensing Information Commercial License
Nexdata/Russian_Spontaneous_Speech_Data
[ "task_categories:automatic-speech-recognition", "language:ru", "region:us" ]
2023-11-07T08:55:21+00:00
{"language": ["ru"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:35:43+00:00
[]
[ "ru" ]
TAGS #task_categories-automatic-speech-recognition #language-Russian #region-us
# Dataset Card for Nexdata/Russian_Spontaneous_Speech_Data ## Description 503 Hours - Russian Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: URL # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including education, interview, sports, etc ## Language Russian; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a word Accuracy Rate (WAR) of being no less than 98%. # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Russian_Spontaneous_Speech_Data", "## Description\n503 Hours - Russian Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding education, interview, sports, etc", "## Language\nRussian;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a word Accuracy Rate (WAR) of being no less than 98%.", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-Russian #region-us \n", "# Dataset Card for Nexdata/Russian_Spontaneous_Speech_Data", "## Description\n503 Hours - Russian Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding education, interview, sports, etc", "## Language\nRussian;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a word Accuracy Rate (WAR) of being no less than 98%.", "# Licensing Information\nCommercial License" ]
[ 27, 22, 86, 3, 12, 11, 4, 17, 15, 21, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-Russian #region-us \n# Dataset Card for Nexdata/Russian_Spontaneous_Speech_Data## Description\n503 Hours - Russian Spontaneous Speech Data, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz, 16bit, mono channel;## Content category\nincluding education, interview, sports, etc## Language\nRussian;## Annotation\nannotation for the transcription text, speaker identification, gender;## Application scenarios\nspeech recognition, video caption generation and video content review;## Accuracy\nat a word Accuracy Rate (WAR) of being no less than 98%.# Licensing Information\nCommercial License" ]
ef47e1dcffc237ff85a2a5f9e35529ba8acd90ff
# Dataset Card for Nexdata/Burmese_Spontaneous_Speech_Data ## Description The 212 Hours - Burmese Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: https://www.nexdata.ai/datasets/1272?source=Huggingface # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including service, conversation, interview, etc. ## Language Burmese; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a word Accuracy Rate (WAR) of being no less than 98%. # Licensing Information Commercial License
Nexdata/Burmese_Spontaneous_Speech_Data
[ "task_categories:automatic-speech-recognition", "language:my", "region:us" ]
2023-11-07T08:56:47+00:00
{"language": ["my"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:35:55+00:00
[]
[ "my" ]
TAGS #task_categories-automatic-speech-recognition #language-Burmese #region-us
# Dataset Card for Nexdata/Burmese_Spontaneous_Speech_Data ## Description The 212 Hours - Burmese Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: URL # Specifications ## Format 16kHz, 16bit, mono channel; ## Content category including service, conversation, interview, etc. ## Language Burmese; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a word Accuracy Rate (WAR) of being no less than 98%. # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Burmese_Spontaneous_Speech_Data", "## Description\nThe 212 Hours - Burmese Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding service, conversation, interview, etc.", "## Language\nBurmese;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a word Accuracy Rate (WAR) of being no less than 98%.", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-Burmese #region-us \n", "# Dataset Card for Nexdata/Burmese_Spontaneous_Speech_Data", "## Description\nThe 212 Hours - Burmese Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Content category\nincluding service, conversation, interview, etc.", "## Language\nBurmese;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a word Accuracy Rate (WAR) of being no less than 98%.", "# Licensing Information\nCommercial License" ]
[ 26, 21, 94, 3, 12, 12, 5, 17, 15, 21, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-Burmese #region-us \n# Dataset Card for Nexdata/Burmese_Spontaneous_Speech_Data## Description\nThe 212 Hours - Burmese Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz, 16bit, mono channel;## Content category\nincluding service, conversation, interview, etc.## Language\nBurmese;## Annotation\nannotation for the transcription text, speaker identification, gender;## Application scenarios\nspeech recognition, video caption generation and video content review;## Accuracy\nat a word Accuracy Rate (WAR) of being no less than 98%.# Licensing Information\nCommercial License" ]
cedfa0889d1d145e4825f02fa8ec38fbbb39cb40
# Dataset Card for Nexdata/French_Children_Spontaneous_Speech_Data ## Description The 162 Hours - French Child's Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: https://www.nexdata.ai/datasets/1307?source=Huggingface # Specifications ## Format mp4 for video and wav for audio; ## age children aged 12 and under; ## Content category including interview, self-meida,variety show, etc. ## Language French; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a Word Accuracy Rate (SAR) of being no less than 98%. # Licensing Information Commercial License
Nexdata/French_Children_Spontaneous_Speech_Data
[ "task_categories:automatic-speech-recognition", "language:fr", "region:us" ]
2023-11-07T08:58:35+00:00
{"language": ["fr"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:35:17+00:00
[]
[ "fr" ]
TAGS #task_categories-automatic-speech-recognition #language-French #region-us
# Dataset Card for Nexdata/French_Children_Spontaneous_Speech_Data ## Description The 162 Hours - French Child's Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: URL # Specifications ## Format mp4 for video and wav for audio; ## age children aged 12 and under; ## Content category including interview, self-meida,variety show, etc. ## Language French; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios speech recognition, video caption generation and video content review; ## Accuracy at a Word Accuracy Rate (SAR) of being no less than 98%. # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/French_Children_Spontaneous_Speech_Data", "## Description\nThe 162 Hours - French Child's Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\nmp4 for video and wav for audio;", "## age\nchildren aged 12 and under;", "## Content category\nincluding interview, self-meida,variety show, etc.", "## Language\nFrench;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a Word Accuracy Rate (SAR) of being no less than 98%.", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-French #region-us \n", "# Dataset Card for Nexdata/French_Children_Spontaneous_Speech_Data", "## Description\nThe 162 Hours - French Child's Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\nmp4 for video and wav for audio;", "## age\nchildren aged 12 and under;", "## Content category\nincluding interview, self-meida,variety show, etc.", "## Language\nFrench;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nspeech recognition, video caption generation and video content review;", "## Accuracy\nat a Word Accuracy Rate (SAR) of being no less than 98%.", "# Licensing Information\nCommercial License" ]
[ 28, 27, 95, 3, 12, 9, 17, 4, 17, 15, 21, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-French #region-us \n# Dataset Card for Nexdata/French_Children_Spontaneous_Speech_Data## Description\nThe 162 Hours - French Child's Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL# Specifications## Format\nmp4 for video and wav for audio;## age\nchildren aged 12 and under;## Content category\nincluding interview, self-meida,variety show, etc.## Language\nFrench;## Annotation\nannotation for the transcription text, speaker identification, gender;## Application scenarios\nspeech recognition, video caption generation and video content review;## Accuracy\nat a Word Accuracy Rate (SAR) of being no less than 98%.# Licensing Information\nCommercial License" ]
95c73af26e15a4fe8b2130380bf09ecf24ffc0da
# Dataset Card for "xlsum_data-xlsumm_results" rouge={'rouge1': 0.3818263240441036, 'rouge2': 0.1806833901933182, 'rougeL': 0.29281990848626877, 'rougeLsum': 0.29281990848626877} Bert={'precision': 0.7890678104872487, 'recall': 0.7642590621622597, 'f1': 0.7759933983573515} mover 0.6348088428016108
arthurmluz/xlsum_data-xlsum_results
[ "region:us" ]
2023-11-07T08:58:45+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "gen_summary", "dtype": "string"}, {"name": "rouge", "struct": [{"name": "rouge1", "dtype": "float64"}, {"name": "rouge2", "dtype": "float64"}, {"name": "rougeL", "dtype": "float64"}, {"name": "rougeLsum", "dtype": "float64"}]}, {"name": "bert", "struct": [{"name": "f1", "sequence": "float64"}, {"name": "hashcode", "dtype": "string"}, {"name": "precision", "sequence": "float64"}, {"name": "recall", "sequence": "float64"}]}, {"name": "moverScore", "dtype": "float64"}], "splits": [{"name": "validation", "num_bytes": 25879688, "num_examples": 7175}], "download_size": 15619175, "dataset_size": 25879688}, "configs": [{"config_name": "default", "data_files": [{"split": "validation", "path": "data/validation-*"}]}]}
2023-11-13T20:39:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "xlsum_data-xlsumm_results" rouge={'rouge1': 0.3818263240441036, 'rouge2': 0.1806833901933182, 'rougeL': 0.29281990848626877, 'rougeLsum': 0.29281990848626877} Bert={'precision': 0.7890678104872487, 'recall': 0.7642590621622597, 'f1': 0.7759933983573515} mover 0.6348088428016108
[ "# Dataset Card for \"xlsum_data-xlsumm_results\"\n\nrouge={'rouge1': 0.3818263240441036, 'rouge2': 0.1806833901933182, 'rougeL': 0.29281990848626877, 'rougeLsum': 0.29281990848626877}\n\nBert={'precision': 0.7890678104872487, 'recall': 0.7642590621622597, 'f1': 0.7759933983573515}\n\nmover 0.6348088428016108" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"xlsum_data-xlsumm_results\"\n\nrouge={'rouge1': 0.3818263240441036, 'rouge2': 0.1806833901933182, 'rougeL': 0.29281990848626877, 'rougeLsum': 0.29281990848626877}\n\nBert={'precision': 0.7890678104872487, 'recall': 0.7642590621622597, 'f1': 0.7759933983573515}\n\nmover 0.6348088428016108" ]
[ 6, 136 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"xlsum_data-xlsumm_results\"\n\nrouge={'rouge1': 0.3818263240441036, 'rouge2': 0.1806833901933182, 'rougeL': 0.29281990848626877, 'rougeLsum': 0.29281990848626877}\n\nBert={'precision': 0.7890678104872487, 'recall': 0.7642590621622597, 'f1': 0.7759933983573515}\n\nmover 0.6348088428016108" ]
8ee447a123a4e1b830d573b896de6010539d7f91
# Dataset Card for Nexdata/German_Children_Spontaneous_Speech_Data ## Description The 97 Hours - German Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research. For more details, please refer to the link: https://www.nexdata.ai/datasets/1299?source=Huggingface # Specifications ## Format 16k Hz, 16 bit, wav, mono channel; ## Age 12 years old and younger children; ## Content category including self-media, conversation, live, lecture, variety show; ## Language German; ## Annotation annotation for the transcription text, speaker identification, gender; ## Accuracy Word Accuracy Rate (WAR) at least 98%. # Licensing Information Commercial License
Nexdata/German_Children_Spontaneous_Speech_Data
[ "task_categories:automatic-speech-recognition", "language:de", "region:us" ]
2023-11-07T09:00:18+00:00
{"language": ["de"], "task_categories": ["automatic-speech-recognition"]}
2024-01-26T08:54:52+00:00
[]
[ "de" ]
TAGS #task_categories-automatic-speech-recognition #language-German #region-us
# Dataset Card for Nexdata/German_Children_Spontaneous_Speech_Data ## Description The 97 Hours - German Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research. For more details, please refer to the link: URL # Specifications ## Format 16k Hz, 16 bit, wav, mono channel; ## Age 12 years old and younger children; ## Content category including self-media, conversation, live, lecture, variety show; ## Language German; ## Annotation annotation for the transcription text, speaker identification, gender; ## Accuracy Word Accuracy Rate (WAR) at least 98%. # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/German_Children_Spontaneous_Speech_Data", "## Description\nThe 97 Hours - German Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16k Hz, 16 bit, wav, mono channel;", "## Age\n12 years old and younger children;", "## Content category\nincluding self-media, conversation, live, lecture, variety show;", "## Language\nGerman;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Accuracy\nWord Accuracy Rate (WAR) at least 98%.", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-German #region-us \n", "# Dataset Card for Nexdata/German_Children_Spontaneous_Speech_Data", "## Description\nThe 97 Hours - German Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16k Hz, 16 bit, wav, mono channel;", "## Age\n12 years old and younger children;", "## Content category\nincluding self-media, conversation, live, lecture, variety show;", "## Language\nGerman;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Accuracy\nWord Accuracy Rate (WAR) at least 98%.", "# Licensing Information\nCommercial License" ]
[ 26, 25, 87, 3, 15, 10, 17, 4, 17, 16, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-German #region-us \n# Dataset Card for Nexdata/German_Children_Spontaneous_Speech_Data## Description\nThe 97 Hours - German Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research.\n\nFor more details, please refer to the link: URL# Specifications## Format\n16k Hz, 16 bit, wav, mono channel;## Age\n12 years old and younger children;## Content category\nincluding self-media, conversation, live, lecture, variety show;## Language\nGerman;## Annotation\nannotation for the transcription text, speaker identification, gender;## Accuracy\nWord Accuracy Rate (WAR) at least 98%.# Licensing Information\nCommercial License" ]
030fe0b646fa428137c90f0f3960717fbb77df51
# Dataset Card for Nexdata/Malay_Conversational_Speech_Data_by_Mobile_Phone ## Description The 127 Hours - Malay Conversational Speech Data by Mobile Phone collected by phone involved 142 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1280?source=Huggingface # Specifications ## Format 16kHz 16bit, uncompressed wav, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 142 speakers totally, with 46% males and 54% females. ## Annotation annotating for the transcription text, speaker identification, gender and noise symbols; ## Device Android mobile phone, iPhone; ## Language Malay; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
Nexdata/Malay_Conversational_Speech_Data_by_Mobile_Phone
[ "task_categories:automatic-speech-recognition", "language:ms", "region:us" ]
2023-11-07T09:01:46+00:00
{"language": ["ms"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:35:06+00:00
[]
[ "ms" ]
TAGS #task_categories-automatic-speech-recognition #language-Malay (macrolanguage) #region-us
# Dataset Card for Nexdata/Malay_Conversational_Speech_Data_by_Mobile_Phone ## Description The 127 Hours - Malay Conversational Speech Data by Mobile Phone collected by phone involved 142 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 16kHz 16bit, uncompressed wav, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 142 speakers totally, with 46% males and 54% females. ## Annotation annotating for the transcription text, speaker identification, gender and noise symbols; ## Device Android mobile phone, iPhone; ## Language Malay; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Malay_Conversational_Speech_Data_by_Mobile_Phone", "## Description\nThe 127 Hours - Malay Conversational Speech Data by Mobile Phone collected by phone involved 142 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz 16bit, uncompressed wav, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n142 speakers totally, with 46% males and 54% females.", "## Annotation\nannotating for the transcription text, speaker identification, gender and noise symbols;", "## Device\nAndroid mobile phone, iPhone;", "## Language\nMalay;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-Malay (macrolanguage) #region-us \n", "# Dataset Card for Nexdata/Malay_Conversational_Speech_Data_by_Mobile_Phone", "## Description\nThe 127 Hours - Malay Conversational Speech Data by Mobile Phone collected by phone involved 142 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz 16bit, uncompressed wav, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n142 speakers totally, with 46% males and 54% females.", "## Annotation\nannotating for the transcription text, speaker identification, gender and noise symbols;", "## Device\nAndroid mobile phone, iPhone;", "## Language\nMalay;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ 32, 27, 148, 3, 18, 14, 30, 20, 23, 8, 5, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-Malay (macrolanguage) #region-us \n# Dataset Card for Nexdata/Malay_Conversational_Speech_Data_by_Mobile_Phone## Description\nThe 127 Hours - Malay Conversational Speech Data by Mobile Phone collected by phone involved 142 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz 16bit, uncompressed wav, mono channel;## Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\n142 speakers totally, with 46% males and 54% females.## Annotation\nannotating for the transcription text, speaker identification, gender and noise symbols;## Device\nAndroid mobile phone, iPhone;## Language\nMalay;## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 98%# Licensing Information\nCommercial License" ]
dabfa365193a1b2362ffcc61e21287a5a91ef8bb
# Dataset Card for Nexdata/Multi-angle_Lip_Multimodal_Video_Data ## Description 202 People - Multi-angle Lip Multimodal Video Data. The collection environments include indoor natural light scenes and indoor fluorescent lamp scenes. The device is cellphone. The diversity includes multiple scenes, different ages, 13 shooting angles. The language is Mandarin Chinese. The recording content is general field, unlimited content. The data can be used in multi-modal learning algorithms research in speech and image fields. For more details, please refer to the link: https://www.nexdata.ai/datasets/1298?source=Huggingface # Specifications ## Data size 202 people, each person collects the audio and video data from 13 different angles +1 txt document ## People distribution race distribution: Asian (Indonesia), gender distribution: 89 males, 113 females, age distribution: 165 people aged 18-30, 32 people aged 31-45, and 5 people aged 46-60 ## Collecting environment indoor natural light scenes, indoor fluorescent lamp scenes ## Data diversity including multiple scenes, different ages, different shooting angles ## Device cellphone, the resolution is 1,920*1,080 ## Collecting angle audio and video data of front face, 3 angles left side face, 3 angles right side face, looking down, looking up, left side face down, right side face down, left side face up and right side face up all 13 different angles were collected at the same time ## Recording content general field, unlimited content ## Language Mandarin Chinese, each video is more than 20 seconds ## Data format the video data format is .mp4, the audio is greater than or equal to 16KHz, 16bit, the frame rate is 25-30 fps ## Accuracy rata the accuracy rate of sentence is more than 95% # Licensing Information Commercial License
Nexdata/Multi-angle_Lip_Multimodal_Video_Data
[ "language:zh", "region:us" ]
2023-11-07T09:03:08+00:00
{"language": ["zh"]}
2024-01-26T08:55:05+00:00
[]
[ "zh" ]
TAGS #language-Chinese #region-us
# Dataset Card for Nexdata/Multi-angle_Lip_Multimodal_Video_Data ## Description 202 People - Multi-angle Lip Multimodal Video Data. The collection environments include indoor natural light scenes and indoor fluorescent lamp scenes. The device is cellphone. The diversity includes multiple scenes, different ages, 13 shooting angles. The language is Mandarin Chinese. The recording content is general field, unlimited content. The data can be used in multi-modal learning algorithms research in speech and image fields. For more details, please refer to the link: URL # Specifications ## Data size 202 people, each person collects the audio and video data from 13 different angles +1 txt document ## People distribution race distribution: Asian (Indonesia), gender distribution: 89 males, 113 females, age distribution: 165 people aged 18-30, 32 people aged 31-45, and 5 people aged 46-60 ## Collecting environment indoor natural light scenes, indoor fluorescent lamp scenes ## Data diversity including multiple scenes, different ages, different shooting angles ## Device cellphone, the resolution is 1,920*1,080 ## Collecting angle audio and video data of front face, 3 angles left side face, 3 angles right side face, looking down, looking up, left side face down, right side face down, left side face up and right side face up all 13 different angles were collected at the same time ## Recording content general field, unlimited content ## Language Mandarin Chinese, each video is more than 20 seconds ## Data format the video data format is .mp4, the audio is greater than or equal to 16KHz, 16bit, the frame rate is 25-30 fps ## Accuracy rata the accuracy rate of sentence is more than 95% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Multi-angle_Lip_Multimodal_Video_Data", "## Description\n202 People - Multi-angle Lip Multimodal Video Data. The collection environments include indoor natural light scenes and indoor fluorescent lamp scenes. The device is cellphone. The diversity includes multiple scenes, different ages, 13 shooting angles. The language is Mandarin Chinese. The recording content is general field, unlimited content. The data can be used in multi-modal learning algorithms research in speech and image fields.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Data size\n202 people, each person collects the audio and video data from 13 different angles +1 txt document", "## People distribution\nrace distribution: Asian (Indonesia), gender distribution: 89 males, 113 females, age distribution: 165 people aged 18-30, 32 people aged 31-45, and 5 people aged 46-60", "## Collecting environment\nindoor natural light scenes, indoor fluorescent lamp scenes", "## Data diversity\nincluding multiple scenes, different ages, different shooting angles", "## Device\ncellphone, the resolution is 1,920*1,080", "## Collecting angle\naudio and video data of front face, 3 angles left side face, 3 angles right side face, looking down, looking up, left side face down, right side face down, left side face up and right side face up all 13 different angles were collected at the same time", "## Recording content\ngeneral field, unlimited content", "## Language\nMandarin Chinese, each video is more than 20 seconds", "## Data format\nthe video data format is .mp4, the audio is greater than or equal to 16KHz, 16bit, the frame rate is 25-30 fps", "## Accuracy rata\nthe accuracy rate of sentence is more than 95%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#language-Chinese #region-us \n", "# Dataset Card for Nexdata/Multi-angle_Lip_Multimodal_Video_Data", "## Description\n202 People - Multi-angle Lip Multimodal Video Data. The collection environments include indoor natural light scenes and indoor fluorescent lamp scenes. The device is cellphone. The diversity includes multiple scenes, different ages, 13 shooting angles. The language is Mandarin Chinese. The recording content is general field, unlimited content. The data can be used in multi-modal learning algorithms research in speech and image fields.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Data size\n202 people, each person collects the audio and video data from 13 different angles +1 txt document", "## People distribution\nrace distribution: Asian (Indonesia), gender distribution: 89 males, 113 females, age distribution: 165 people aged 18-30, 32 people aged 31-45, and 5 people aged 46-60", "## Collecting environment\nindoor natural light scenes, indoor fluorescent lamp scenes", "## Data diversity\nincluding multiple scenes, different ages, different shooting angles", "## Device\ncellphone, the resolution is 1,920*1,080", "## Collecting angle\naudio and video data of front face, 3 angles left side face, 3 angles right side face, looking down, looking up, left side face down, right side face down, left side face up and right side face up all 13 different angles were collected at the same time", "## Recording content\ngeneral field, unlimited content", "## Language\nMandarin Chinese, each video is more than 20 seconds", "## Data format\nthe video data format is .mp4, the audio is greater than or equal to 16KHz, 16bit, the frame rate is 25-30 fps", "## Accuracy rata\nthe accuracy rate of sentence is more than 95%", "# Licensing Information\nCommercial License" ]
[ 11, 24, 107, 3, 24, 45, 18, 17, 13, 63, 9, 13, 35, 16, 9 ]
[ "passage: TAGS\n#language-Chinese #region-us \n# Dataset Card for Nexdata/Multi-angle_Lip_Multimodal_Video_Data## Description\n202 People - Multi-angle Lip Multimodal Video Data. The collection environments include indoor natural light scenes and indoor fluorescent lamp scenes. The device is cellphone. The diversity includes multiple scenes, different ages, 13 shooting angles. The language is Mandarin Chinese. The recording content is general field, unlimited content. The data can be used in multi-modal learning algorithms research in speech and image fields.\n\nFor more details, please refer to the link: URL# Specifications## Data size\n202 people, each person collects the audio and video data from 13 different angles +1 txt document## People distribution\nrace distribution: Asian (Indonesia), gender distribution: 89 males, 113 females, age distribution: 165 people aged 18-30, 32 people aged 31-45, and 5 people aged 46-60## Collecting environment\nindoor natural light scenes, indoor fluorescent lamp scenes## Data diversity\nincluding multiple scenes, different ages, different shooting angles## Device\ncellphone, the resolution is 1,920*1,080## Collecting angle\naudio and video data of front face, 3 angles left side face, 3 angles right side face, looking down, looking up, left side face down, right side face down, left side face up and right side face up all 13 different angles were collected at the same time## Recording content\ngeneral field, unlimited content## Language\nMandarin Chinese, each video is more than 20 seconds## Data format\nthe video data format is .mp4, the audio is greater than or equal to 16KHz, 16bit, the frame rate is 25-30 fps## Accuracy rata\nthe accuracy rate of sentence is more than 95%# Licensing Information\nCommercial License" ]
f6cc066ae06c6bd44d1a3f722910531fe1465c81
# Dataset Card for Nexdata/Italian_Children_Spontaneous_Speech_Speech_Data ## Description The 101 Hours - Italian Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research. For more details, please refer to the link: https://www.nexdata.ai/datasets/1300?source=Huggingface # Specifications ## Format 16k Hz, 16 bit, wav, mono channel; ## Age 12 years old and younger children; ## Content category including self-media, conversation, live, lecture, variety show; ## Language Italian ## Annotation annotation for the transcription text, speaker identification, gender; ## Accuracy Word Accuracy Rate (WAR) at least 98%. # Licensing Information Commercial License
Nexdata/Italian_Children_Spontaneous_Speech_Speech_Data
[ "task_categories:automatic-speech-recognition", "language:it", "region:us" ]
2023-11-07T09:04:42+00:00
{"language": ["it"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:31:38+00:00
[]
[ "it" ]
TAGS #task_categories-automatic-speech-recognition #language-Italian #region-us
# Dataset Card for Nexdata/Italian_Children_Spontaneous_Speech_Speech_Data ## Description The 101 Hours - Italian Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research. For more details, please refer to the link: URL # Specifications ## Format 16k Hz, 16 bit, wav, mono channel; ## Age 12 years old and younger children; ## Content category including self-media, conversation, live, lecture, variety show; ## Language Italian ## Annotation annotation for the transcription text, speaker identification, gender; ## Accuracy Word Accuracy Rate (WAR) at least 98%. # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Italian_Children_Spontaneous_Speech_Speech_Data", "## Description\nThe 101 Hours - Italian Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16k Hz, 16 bit, wav, mono channel;", "## Age\n12 years old and younger children;", "## Content category\nincluding self-media, conversation, live, lecture, variety show;", "## Language\nItalian", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Accuracy\nWord Accuracy Rate (WAR) at least 98%.", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-Italian #region-us \n", "# Dataset Card for Nexdata/Italian_Children_Spontaneous_Speech_Speech_Data", "## Description\nThe 101 Hours - Italian Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16k Hz, 16 bit, wav, mono channel;", "## Age\n12 years old and younger children;", "## Content category\nincluding self-media, conversation, live, lecture, variety show;", "## Language\nItalian", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Accuracy\nWord Accuracy Rate (WAR) at least 98%.", "# Licensing Information\nCommercial License" ]
[ 27, 30, 87, 3, 15, 10, 17, 3, 17, 16, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-Italian #region-us \n# Dataset Card for Nexdata/Italian_Children_Spontaneous_Speech_Speech_Data## Description\nThe 101 Hours - Italian Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research.\n\nFor more details, please refer to the link: URL# Specifications## Format\n16k Hz, 16 bit, wav, mono channel;## Age\n12 years old and younger children;## Content category\nincluding self-media, conversation, live, lecture, variety show;## Language\nItalian## Annotation\nannotation for the transcription text, speaker identification, gender;## Accuracy\nWord Accuracy Rate (WAR) at least 98%.# Licensing Information\nCommercial License" ]
b96577c256e1c388406c6b379e55ac71b0b90724
# Dataset Card for Nexdata/Canadian_French_Conversational_Speech_Data_by_Mobile_Phone ## Description 80 Hours - Canadian French Conversational Speech Data by Mobile Phone involved 126 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1302?source=Huggingface # Specifications ## Format 16kHz 16bit, uncompressed wav, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 126 speakers totally, with 48% males and 52% females; ## Annotation annotating for the transcription text, speaker identification, gender and noise symbols; ## Device Android mobile phone, iPhone; ## Language French; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
Nexdata/Canadian_French_Conversational_Speech_Data_by_Mobile_Phone
[ "task_categories:automatic-speech-recognition", "language:fr", "region:us" ]
2023-11-07T09:06:09+00:00
{"language": ["fr"], "task_categories": ["automatic-speech-recognition"]}
2024-01-26T08:54:49+00:00
[]
[ "fr" ]
TAGS #task_categories-automatic-speech-recognition #language-French #region-us
# Dataset Card for Nexdata/Canadian_French_Conversational_Speech_Data_by_Mobile_Phone ## Description 80 Hours - Canadian French Conversational Speech Data by Mobile Phone involved 126 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 16kHz 16bit, uncompressed wav, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 126 speakers totally, with 48% males and 52% females; ## Annotation annotating for the transcription text, speaker identification, gender and noise symbols; ## Device Android mobile phone, iPhone; ## Language French; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Canadian_French_Conversational_Speech_Data_by_Mobile_Phone", "## Description\n80 Hours - Canadian French Conversational Speech Data by Mobile Phone involved 126 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz 16bit, uncompressed wav, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n126 speakers totally, with 48% males and 52% females;", "## Annotation\nannotating for the transcription text, speaker identification, gender and noise symbols;", "## Device\nAndroid mobile phone, iPhone;", "## Language\nFrench;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-French #region-us \n", "# Dataset Card for Nexdata/Canadian_French_Conversational_Speech_Data_by_Mobile_Phone", "## Description\n80 Hours - Canadian French Conversational Speech Data by Mobile Phone involved 126 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz 16bit, uncompressed wav, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n126 speakers totally, with 48% males and 52% females;", "## Annotation\nannotating for the transcription text, speaker identification, gender and noise symbols;", "## Device\nAndroid mobile phone, iPhone;", "## Language\nFrench;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ 28, 32, 143, 3, 18, 14, 30, 20, 23, 8, 4, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-French #region-us \n# Dataset Card for Nexdata/Canadian_French_Conversational_Speech_Data_by_Mobile_Phone## Description\n80 Hours - Canadian French Conversational Speech Data by Mobile Phone involved 126 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz 16bit, uncompressed wav, mono channel;## Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\n126 speakers totally, with 48% males and 52% females;## Annotation\nannotating for the transcription text, speaker identification, gender and noise symbols;## Device\nAndroid mobile phone, iPhone;## Language\nFrench;## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 98%# Licensing Information\nCommercial License" ]
5201318b96c2a59d638a7d21df95adadf8b2e0db
# Dataset Card for "seven_cups" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
NUS-IDS/seven_cups
[ "region:us" ]
2023-11-07T09:07:33+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "anxiety", "path": "data/anxiety-*"}, {"split": "bipolar", "path": "data/bipolar-*"}, {"split": "depression", "path": "data/depression-*"}, {"split": "personalitydisorders", "path": "data/personalitydisorders-*"}, {"split": "trauma", "path": "data/trauma-*"}]}], "dataset_info": {"features": [{"name": "lead_post", "struct": [{"name": "author", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "thread_id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "topic", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "comment_posts", "list": [{"name": "author", "dtype": "string"}, {"name": "content", "dtype": "string"}, {"name": "parent_ids", "sequence": "string"}, {"name": "post_id", "dtype": "string"}, {"name": "thread_id", "dtype": "string"}, {"name": "url", "dtype": "string"}]}], "splits": [{"name": "anxiety", "num_bytes": 24332055, "num_examples": 7948}, {"name": "bipolar", "num_bytes": 3496018, "num_examples": 1033}, {"name": "depression", "num_bytes": 59927557, "num_examples": 10243}, {"name": "personalitydisorders", "num_bytes": 9791687, "num_examples": 1854}, {"name": "trauma", "num_bytes": 53211657, "num_examples": 5763}], "download_size": 62533846, "dataset_size": 150758974}}
2023-11-07T09:08:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "seven_cups" More Information needed
[ "# Dataset Card for \"seven_cups\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"seven_cups\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"seven_cups\"\n\nMore Information needed" ]
71b91c3121bbb6ecbfe039d43202d30531a5b6f5
# Dataset Card for Nexdata/Mexican_Spanish_Conversational_Speech_Data_by_Mobile_Phone ## Description 107 Hours - Mexican Spanish Conversational Speech Data by Mobile Phone involved 126 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1303?source=Huggingface # Specifications ## Format 16kHz 16bit, uncompressed wav, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 126 speakers totally, with 48% male and 52% female; ## Annotation annotating for the transcription text, speaker identification and gender ## Device Android mobile phone, iPhone; ## Language Spanish; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
Nexdata/Mexican_Spanish_Conversational_Speech_Data_by_Mobile_Phone
[ "task_categories:automatic-speech-recognition", "language:es", "region:us" ]
2023-11-07T09:07:46+00:00
{"language": ["es"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:31:27+00:00
[]
[ "es" ]
TAGS #task_categories-automatic-speech-recognition #language-Spanish #region-us
# Dataset Card for Nexdata/Mexican_Spanish_Conversational_Speech_Data_by_Mobile_Phone ## Description 107 Hours - Mexican Spanish Conversational Speech Data by Mobile Phone involved 126 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 16kHz 16bit, uncompressed wav, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 126 speakers totally, with 48% male and 52% female; ## Annotation annotating for the transcription text, speaker identification and gender ## Device Android mobile phone, iPhone; ## Language Spanish; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Mexican_Spanish_Conversational_Speech_Data_by_Mobile_Phone", "## Description\n107 Hours - Mexican Spanish Conversational Speech Data by Mobile Phone involved 126 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz 16bit, uncompressed wav, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n126 speakers totally, with 48% male and 52% female;", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nAndroid mobile phone, iPhone;", "## Language\nSpanish;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-Spanish #region-us \n", "# Dataset Card for Nexdata/Mexican_Spanish_Conversational_Speech_Data_by_Mobile_Phone", "## Description\n107 Hours - Mexican Spanish Conversational Speech Data by Mobile Phone involved 126 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz 16bit, uncompressed wav, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n126 speakers totally, with 48% male and 52% female;", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nAndroid mobile phone, iPhone;", "## Language\nSpanish;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ 27, 31, 143, 3, 18, 14, 30, 18, 17, 8, 4, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-Spanish #region-us \n# Dataset Card for Nexdata/Mexican_Spanish_Conversational_Speech_Data_by_Mobile_Phone## Description\n107 Hours - Mexican Spanish Conversational Speech Data by Mobile Phone involved 126 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 16kHz, 16bit, uncompressed WAV, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz 16bit, uncompressed wav, mono channel;## Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\n126 speakers totally, with 48% male and 52% female;## Annotation\nannotating for the transcription text, speaker identification and gender## Device\nAndroid mobile phone, iPhone;## Language\nSpanish;## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 98%# Licensing Information\nCommercial License" ]
b08dde2bb6d874cb8a00e6cc803641c9e816da6e
# Dataset Card for Nexdata/Brazilian_Portuguese_Conversational_Speech_Data_by_Telephone ## Description 104 Hours - Brazilian Portuguese Conversational Speech Data by Telephone involved 118 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, u-law pcm, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1313?source=Huggingface # Specifications ## Format 8kHz 8bit, u-law pcm, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 118 speakers totally, with 54% males and 46% females. ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language Portuguese; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
Nexdata/Brazilian_Portuguese_Conversational_Speech_Data_by_Telephone
[ "task_categories:automatic-speech-recognition", "language:pt", "region:us" ]
2023-11-07T09:09:40+00:00
{"language": ["pt"], "task_categories": ["automatic-speech-recognition"]}
2024-01-26T08:54:53+00:00
[]
[ "pt" ]
TAGS #task_categories-automatic-speech-recognition #language-Portuguese #region-us
# Dataset Card for Nexdata/Brazilian_Portuguese_Conversational_Speech_Data_by_Telephone ## Description 104 Hours - Brazilian Portuguese Conversational Speech Data by Telephone involved 118 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, u-law pcm, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 8kHz 8bit, u-law pcm, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 118 speakers totally, with 54% males and 46% females. ## Annotation annotating for the transcription text, speaker identification and gender ## Device Telephony recording system; ## Language Portuguese; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Brazilian_Portuguese_Conversational_Speech_Data_by_Telephone", "## Description\n104 Hours - Brazilian Portuguese Conversational Speech Data by Telephone involved 118 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, u-law pcm, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz 8bit, u-law pcm, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n118 speakers totally, with 54% males and 46% females.", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nPortuguese;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-Portuguese #region-us \n", "# Dataset Card for Nexdata/Brazilian_Portuguese_Conversational_Speech_Data_by_Telephone", "## Description\n104 Hours - Brazilian Portuguese Conversational Speech Data by Telephone involved 118 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, u-law pcm, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz 8bit, u-law pcm, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n118 speakers totally, with 54% males and 46% females.", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nTelephony recording system;", "## Language\nPortuguese;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ 28, 31, 145, 3, 17, 14, 30, 20, 17, 8, 6, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-Portuguese #region-us \n# Dataset Card for Nexdata/Brazilian_Portuguese_Conversational_Speech_Data_by_Telephone## Description\n104 Hours - Brazilian Portuguese Conversational Speech Data by Telephone involved 118 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, u-law pcm, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n8kHz 8bit, u-law pcm, mono channel;## Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\n118 speakers totally, with 54% males and 46% females.## Annotation\nannotating for the transcription text, speaker identification and gender## Device\nTelephony recording system;## Language\nPortuguese;## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 98%# Licensing Information\nCommercial License" ]
2567e3b67634b04941d46d9295273c951ec2859f
# Dataset Card for Nexdata/Russian_Children_Spontaneous_Speech_Data ## Description The 163 Hours - Russian Child's Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: https://www.nexdata.ai/datasets/1308?source=Huggingface # Specifications ## Format 16kHz, 16bit, mono channel; ## Age children aged 12 and under; ## Content category including interview, self-meida,variety show, etc. ## Language Russian; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios video caption generation and video content review; ## Accuracy at a Word Accuracy Rate (SAR) of being no less than 98% # Licensing Information Commercial License
Nexdata/Russian_Children_Spontaneous_Speech_Data
[ "task_categories:automatic-speech-recognition", "language:ru", "region:us" ]
2023-11-07T09:11:18+00:00
{"language": ["ru"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:30:34+00:00
[]
[ "ru" ]
TAGS #task_categories-automatic-speech-recognition #language-Russian #region-us
# Dataset Card for Nexdata/Russian_Children_Spontaneous_Speech_Data ## Description The 163 Hours - Russian Child's Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction For more details, please refer to the link: URL # Specifications ## Format 16kHz, 16bit, mono channel; ## Age children aged 12 and under; ## Content category including interview, self-meida,variety show, etc. ## Language Russian; ## Annotation annotation for the transcription text, speaker identification, gender; ## Application scenarios video caption generation and video content review; ## Accuracy at a Word Accuracy Rate (SAR) of being no less than 98% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Russian_Children_Spontaneous_Speech_Data", "## Description\nThe 163 Hours - Russian Child's Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Age\nchildren aged 12 and under;", "## Content category\nincluding interview, self-meida,variety show, etc.", "## Language\nRussian;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nvideo caption generation and video content review;", "## Accuracy\nat a Word Accuracy Rate (SAR) of being no less than 98%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-Russian #region-us \n", "# Dataset Card for Nexdata/Russian_Children_Spontaneous_Speech_Data", "## Description\nThe 163 Hours - Russian Child's Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16kHz, 16bit, mono channel;", "## Age\nchildren aged 12 and under;", "## Content category\nincluding interview, self-meida,variety show, etc.", "## Language\nRussian;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Application scenarios\nvideo caption generation and video content review;", "## Accuracy\nat a Word Accuracy Rate (SAR) of being no less than 98%", "# Licensing Information\nCommercial License" ]
[ 27, 26, 96, 3, 12, 9, 17, 4, 17, 12, 20, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-Russian #region-us \n# Dataset Card for Nexdata/Russian_Children_Spontaneous_Speech_Data## Description\nThe 163 Hours - Russian Child's Spontaneous Speech Data is a collection of speech clips, the content covering multiple topics. All the speech audio was manually transcribed into text content; speaker identity, gender, and other attribution are also annotated. This dataset can be used for voiceprint recognition model training, corpus construction for machine translation, and algorithm research introduction\n\nFor more details, please refer to the link: URL# Specifications## Format\n16kHz, 16bit, mono channel;## Age\nchildren aged 12 and under;## Content category\nincluding interview, self-meida,variety show, etc.## Language\nRussian;## Annotation\nannotation for the transcription text, speaker identification, gender;## Application scenarios\nvideo caption generation and video content review;## Accuracy\nat a Word Accuracy Rate (SAR) of being no less than 98%# Licensing Information\nCommercial License" ]
ebf24fa5dec9c91104c390fe4eca769f09f49249
# Dataset Card for Nexdata/Thai_Children_Spontaneous_Speech_Data ## Description The 100 Hours - Thai Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research. For more details, please refer to the link: https://www.nexdata.ai/datasets/1330?source=Huggingface # Specifications ## Format 16k Hz, 16 bit, wav, mono channel; ## Age 12 years old and younger children; ## Content category including self-media, conversation, live, lecture, variety show; ## Language Thai; ## Annotation annotation for the transcription text, speaker identification, gender; ## Accuracy Word Accuracy Rate (WAR) at least 98%. # Licensing Information Commercial License
Nexdata/Thai_Children_Spontaneous_Speech_Data
[ "task_categories:automatic-speech-recognition", "language:th", "region:us" ]
2023-11-07T09:12:46+00:00
{"language": ["th"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:30:53+00:00
[]
[ "th" ]
TAGS #task_categories-automatic-speech-recognition #language-Thai #region-us
# Dataset Card for Nexdata/Thai_Children_Spontaneous_Speech_Data ## Description The 100 Hours - Thai Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research. For more details, please refer to the link: URL # Specifications ## Format 16k Hz, 16 bit, wav, mono channel; ## Age 12 years old and younger children; ## Content category including self-media, conversation, live, lecture, variety show; ## Language Thai; ## Annotation annotation for the transcription text, speaker identification, gender; ## Accuracy Word Accuracy Rate (WAR) at least 98%. # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Thai_Children_Spontaneous_Speech_Data", "## Description\nThe 100 Hours - Thai Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16k Hz, 16 bit, wav, mono channel;", "## Age\n12 years old and younger children;", "## Content category\nincluding self-media, conversation, live, lecture, variety show;", "## Language\nThai;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Accuracy\nWord Accuracy Rate (WAR) at least 98%.", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-Thai #region-us \n", "# Dataset Card for Nexdata/Thai_Children_Spontaneous_Speech_Data", "## Description\nThe 100 Hours - Thai Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16k Hz, 16 bit, wav, mono channel;", "## Age\n12 years old and younger children;", "## Content category\nincluding self-media, conversation, live, lecture, variety show;", "## Language\nThai;", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Accuracy\nWord Accuracy Rate (WAR) at least 98%.", "# Licensing Information\nCommercial License" ]
[ 27, 26, 87, 3, 15, 10, 17, 4, 17, 16, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-Thai #region-us \n# Dataset Card for Nexdata/Thai_Children_Spontaneous_Speech_Data## Description\nThe 100 Hours - Thai Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research.\n\nFor more details, please refer to the link: URL# Specifications## Format\n16k Hz, 16 bit, wav, mono channel;## Age\n12 years old and younger children;## Content category\nincluding self-media, conversation, live, lecture, variety show;## Language\nThai;## Annotation\nannotation for the transcription text, speaker identification, gender;## Accuracy\nWord Accuracy Rate (WAR) at least 98%.# Licensing Information\nCommercial License" ]
5cb3eb6b8e74a730df3c7e5e0421d3f372311aeb
# Dataset Card for Nexdata/Indonesian_Conversational_Speech_Data_by_Telephone ## Description The 89 Hours - Indonesian conversational speech data collected by Telephone involved 124 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, u-law pcm, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: https://www.nexdata.ai/datasets/1311?source=Huggingface # Specifications ## Format 8kHz 8bit, u-law pcm, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 140 speakers totally, with 54% male and 46% female ## Annotation annotating for the transcription text, speaker identification and gender ## Device Android mobile phone, iPhone; ## Language Indonesian; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
Nexdata/Indonesian_Conversational_Speech_Data_by_Telephone
[ "task_categories:automatic-speech-recognition", "language:id", "region:us" ]
2023-11-07T09:13:56+00:00
{"language": ["id"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:30:21+00:00
[]
[ "id" ]
TAGS #task_categories-automatic-speech-recognition #language-Indonesian #region-us
# Dataset Card for Nexdata/Indonesian_Conversational_Speech_Data_by_Telephone ## Description The 89 Hours - Indonesian conversational speech data collected by Telephone involved 124 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, u-law pcm, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification. For more details, please refer to the link: URL # Specifications ## Format 8kHz 8bit, u-law pcm, mono channel; ## Environment quiet indoor environment, without echo; ## Recording content dozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed; ## Demographics 140 speakers totally, with 54% male and 46% female ## Annotation annotating for the transcription text, speaker identification and gender ## Device Android mobile phone, iPhone; ## Language Indonesian; ## Application scenarios speech recognition; voiceprint recognition; ## Accuracy rate the word accuracy rate is not less than 98% # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Indonesian_Conversational_Speech_Data_by_Telephone", "## Description\nThe 89 Hours - Indonesian conversational speech data collected by Telephone involved 124 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, u-law pcm, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz 8bit, u-law pcm, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n140 speakers totally, with 54% male and 46% female", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nAndroid mobile phone, iPhone;", "## Language\nIndonesian;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-Indonesian #region-us \n", "# Dataset Card for Nexdata/Indonesian_Conversational_Speech_Data_by_Telephone", "## Description\nThe 89 Hours - Indonesian conversational speech data collected by Telephone involved 124 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, u-law pcm, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n8kHz 8bit, u-law pcm, mono channel;", "## Environment\nquiet indoor environment, without echo;", "## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;", "## Demographics\n140 speakers totally, with 54% male and 46% female", "## Annotation\nannotating for the transcription text, speaker identification and gender", "## Device\nAndroid mobile phone, iPhone;", "## Language\nIndonesian;", "## Application scenarios\nspeech recognition; voiceprint recognition;", "## Accuracy rate\nthe word accuracy rate is not less than 98%", "# Licensing Information\nCommercial License" ]
[ 27, 26, 144, 3, 17, 14, 30, 17, 17, 8, 5, 11, 16, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-Indonesian #region-us \n# Dataset Card for Nexdata/Indonesian_Conversational_Speech_Data_by_Telephone## Description\nThe 89 Hours - Indonesian conversational speech data collected by Telephone involved 124 native speakers, developed with proper balance of gender ratio, Speakers would choose a few familiar topics out of the given list and start conversations to ensure dialogues' fluency and naturalness. The recording devices are various mobile phones. The audio format is 8kHz, 8bit, u-law pcm, and all the speech data was recorded in quiet indoor environments. All the speech audio was manually transcribed with text content, the start and end time of each effective sentence, and speaker identification.\n\nFor more details, please refer to the link: URL# Specifications## Format\n8kHz 8bit, u-law pcm, mono channel;## Environment\nquiet indoor environment, without echo;## Recording content\ndozens of topics are specified, and the speakers make dialogue under those topics while the recording is performed;## Demographics\n140 speakers totally, with 54% male and 46% female## Annotation\nannotating for the transcription text, speaker identification and gender## Device\nAndroid mobile phone, iPhone;## Language\nIndonesian;## Application scenarios\nspeech recognition; voiceprint recognition;## Accuracy rate\nthe word accuracy rate is not less than 98%# Licensing Information\nCommercial License" ]
d606b9e626da414af965df5978c4ed0fbd6ad668
# Dataset Card for Nexdata/Indonesian_Children_Spontaneous_Speech_Data ## Description The 100 Hours - Indonesian Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research. For more details, please refer to the link: https://www.nexdata.ai/datasets/1332?source=Huggingface # Specifications ## Format 16k Hz, 16 bit, wav, mono channel; ## Age 12 years old and younger children; ## Content category including self-media, conversation, live, lecture, variety show; ## Language Indonesian ## Annotation annotation for the transcription text, speaker identification, gender; ## Accuracy Word Accuracy Rate (WAR) at least 98%. # Licensing Information Commercial License
Nexdata/Indonesian_Children_Spontaneous_Speech_Data
[ "task_categories:automatic-speech-recognition", "language:id", "region:us" ]
2023-11-07T09:15:27+00:00
{"language": ["id"], "task_categories": ["automatic-speech-recognition"]}
2023-11-10T07:29:54+00:00
[]
[ "id" ]
TAGS #task_categories-automatic-speech-recognition #language-Indonesian #region-us
# Dataset Card for Nexdata/Indonesian_Children_Spontaneous_Speech_Data ## Description The 100 Hours - Indonesian Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research. For more details, please refer to the link: URL # Specifications ## Format 16k Hz, 16 bit, wav, mono channel; ## Age 12 years old and younger children; ## Content category including self-media, conversation, live, lecture, variety show; ## Language Indonesian ## Annotation annotation for the transcription text, speaker identification, gender; ## Accuracy Word Accuracy Rate (WAR) at least 98%. # Licensing Information Commercial License
[ "# Dataset Card for Nexdata/Indonesian_Children_Spontaneous_Speech_Data", "## Description\nThe 100 Hours - Indonesian Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16k Hz, 16 bit, wav, mono channel;", "## Age\n12 years old and younger children;", "## Content category\nincluding self-media, conversation, live, lecture, variety show;", "## Language\nIndonesian", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Accuracy\nWord Accuracy Rate (WAR) at least 98%.", "# Licensing Information\nCommercial License" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-Indonesian #region-us \n", "# Dataset Card for Nexdata/Indonesian_Children_Spontaneous_Speech_Data", "## Description\nThe 100 Hours - Indonesian Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research.\n\nFor more details, please refer to the link: URL", "# Specifications", "## Format\n16k Hz, 16 bit, wav, mono channel;", "## Age\n12 years old and younger children;", "## Content category\nincluding self-media, conversation, live, lecture, variety show;", "## Language\nIndonesian", "## Annotation\nannotation for the transcription text, speaker identification, gender;", "## Accuracy\nWord Accuracy Rate (WAR) at least 98%.", "# Licensing Information\nCommercial License" ]
[ 27, 26, 88, 3, 15, 10, 17, 4, 17, 16, 9 ]
[ "passage: TAGS\n#task_categories-automatic-speech-recognition #language-Indonesian #region-us \n# Dataset Card for Nexdata/Indonesian_Children_Spontaneous_Speech_Data## Description\nThe 100 Hours - Indonesian Child's Spontaneous Speech Data, manually screened and processed. Annotation contains transcription text, speaker identification, gender and other informantion. This dataset can be applied in speech recognition (acoustic model or language model training), caption generation, voice content moderation and other AI algorithm research.\n\nFor more details, please refer to the link: URL# Specifications## Format\n16k Hz, 16 bit, wav, mono channel;## Age\n12 years old and younger children;## Content category\nincluding self-media, conversation, live, lecture, variety show;## Language\nIndonesian## Annotation\nannotation for the transcription text, speaker identification, gender;## Accuracy\nWord Accuracy Rate (WAR) at least 98%.# Licensing Information\nCommercial License" ]
3f899582af72c419bffe3026d5ba93bbc442133c
# Dataset Card for "cowc-m" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
danielz01/cowc-m
[ "region:us" ]
2023-11-07T09:27:19+00:00
{"dataset_info": [{"config_name": "Columbus", "features": [{"name": "Folder_Name", "dtype": "string"}, {"name": "File_Name", "dtype": "string"}, {"name": "Neg_Count", "dtype": "int64"}, {"name": "Other_Count", "dtype": "int64"}, {"name": "Pickup_Count", "dtype": "int64"}, {"name": "Sedan_Count", "dtype": "int64"}, {"name": "Unknown_Count", "dtype": "int64"}, {"name": "image", "dtype": "image"}, {"name": "city", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "objects", "struct": [{"name": "bbox", "sequence": {"sequence": "int64"}}, {"name": "categories", "sequence": "string"}]}], "splits": [{"name": "train", "num_bytes": 36823834.0, "num_examples": 396}], "download_size": 36567855, "dataset_size": 36823834.0}, {"config_name": "Potsdam", "features": [{"name": "Folder_Name", "dtype": "string"}, {"name": "File_Name", "dtype": "string"}, {"name": "Neg_Count", "dtype": "int64"}, {"name": "Other_Count", "dtype": "int64"}, {"name": "Pickup_Count", "dtype": "int64"}, {"name": "Sedan_Count", "dtype": "int64"}, {"name": "Unknown_Count", "dtype": "int64"}, {"name": "image", "dtype": "image"}, {"name": "city", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "objects", "struct": [{"name": "bbox", "sequence": {"sequence": "int64"}}, {"name": "categories", "sequence": "string"}]}], "splits": [{"name": "train", "num_bytes": 56945092.0, "num_examples": 504}], "download_size": 56738231, "dataset_size": 56945092.0}, {"config_name": "Selwyn", "features": [{"name": "Folder_Name", "dtype": "string"}, {"name": "File_Name", "dtype": "string"}, {"name": "Neg_Count", "dtype": "int64"}, {"name": "Other_Count", "dtype": "int64"}, {"name": "Pickup_Count", "dtype": "int64"}, {"name": "Sedan_Count", "dtype": "int64"}, {"name": "Unknown_Count", "dtype": "int64"}, {"name": "image", "dtype": "image"}, {"name": "city", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "objects", "struct": [{"name": "bbox", "sequence": {"sequence": "int64"}}, {"name": "categories", "sequence": "string"}]}], "splits": [{"name": "train", "num_bytes": 144665577.994, "num_examples": 1354}], "download_size": 145421967, "dataset_size": 144665577.994}, {"config_name": "Toronto", "features": [{"name": "Folder_Name", "dtype": "string"}, {"name": "File_Name", "dtype": "string"}, {"name": "Neg_Count", "dtype": "int64"}, {"name": "Other_Count", "dtype": "int64"}, {"name": "Pickup_Count", "dtype": "int64"}, {"name": "Sedan_Count", "dtype": "int64"}, {"name": "Unknown_Count", "dtype": "int64"}, {"name": "image", "dtype": "image"}, {"name": "city", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "objects", "struct": [{"name": "bbox", "sequence": {"sequence": "int64"}}, {"name": "categories", "sequence": "string"}]}], "splits": [{"name": "train", "num_bytes": 338228293.021, "num_examples": 3173}], "download_size": 339771087, "dataset_size": 338228293.021}, {"config_name": "Utah", "features": [{"name": "Folder_Name", "dtype": "string"}, {"name": "File_Name", "dtype": "string"}, {"name": "Neg_Count", "dtype": "int64"}, {"name": "Other_Count", "dtype": "int64"}, {"name": "Pickup_Count", "dtype": "int64"}, {"name": "Sedan_Count", "dtype": "int64"}, {"name": "Unknown_Count", "dtype": "int64"}, {"name": "image", "dtype": "image"}, {"name": "city", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "objects", "struct": [{"name": "bbox", "sequence": {"sequence": "int64"}}, {"name": "categories", "sequence": "string"}]}], "splits": [{"name": "train", "num_bytes": 539966912.021, "num_examples": 5187}], "download_size": 517998298, "dataset_size": 539966912.021}, {"config_name": "Vaihingen", "features": [{"name": "Folder_Name", "dtype": "string"}, {"name": "File_Name", "dtype": "string"}, {"name": "Neg_Count", "dtype": "int64"}, {"name": "Other_Count", "dtype": "int64"}, {"name": "Pickup_Count", "dtype": "int64"}, {"name": "Sedan_Count", "dtype": "int64"}, {"name": "Unknown_Count", "dtype": "int64"}, {"name": "image", "dtype": "image"}, {"name": "city", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "objects", "struct": [{"name": "bbox", "sequence": {"sequence": "int64"}}, {"name": "categories", "sequence": "string"}]}], "splits": [{"name": "train", "num_bytes": 147781754.194, "num_examples": 1331}], "download_size": 145970413, "dataset_size": 147781754.194}], "configs": [{"config_name": "Columbus", "data_files": [{"split": "train", "path": "Columbus/train-*"}]}, {"config_name": "Potsdam", "data_files": [{"split": "train", "path": "Potsdam/train-*"}]}, {"config_name": "Selwyn", "data_files": [{"split": "train", "path": "Selwyn/train-*"}]}, {"config_name": "Toronto", "data_files": [{"split": "train", "path": "Toronto/train-*"}]}, {"config_name": "Utah", "data_files": [{"split": "train", "path": "Utah/train-*"}]}, {"config_name": "Vaihingen", "data_files": [{"split": "train", "path": "Vaihingen/train-*"}]}]}
2023-11-07T09:29:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cowc-m" More Information needed
[ "# Dataset Card for \"cowc-m\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cowc-m\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cowc-m\"\n\nMore Information needed" ]
98ba0004120f2b16fb786063c6e0209a9210d58a
# Dataset Card for "DocVQA_layoutLM_large" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Sharka/DocVQA_for_LayoutLM
[ "region:us" ]
2023-11-07T09:29:08+00:00
{"dataset_info": {"features": [{"name": "image", "sequence": {"sequence": {"sequence": "uint8"}}}, {"name": "input_ids", "sequence": "int32"}, {"name": "token_type_ids", "sequence": "int8"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "bbox", "sequence": {"sequence": "int64"}}, {"name": "start_positions", "dtype": "int64"}, {"name": "end_positions", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 6671593632, "num_examples": 38174}, {"name": "validation", "num_bytes": 882054096, "num_examples": 5047}], "download_size": 2456688624, "dataset_size": 7553647728}}
2023-11-07T09:37:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "DocVQA_layoutLM_large" More Information needed
[ "# Dataset Card for \"DocVQA_layoutLM_large\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"DocVQA_layoutLM_large\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"DocVQA_layoutLM_large\"\n\nMore Information needed" ]
c30d7018342e856e1c73f27abc7f8dd42845afe4
# Dataset Card for "coastal3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
peldrak/coastal3
[ "region:us" ]
2023-11-07T09:57:32+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "pixel_values", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 442266694.208, "num_examples": 1296}, {"name": "test", "num_bytes": 147937358.0, "num_examples": 370}], "download_size": 611506244, "dataset_size": 590204052.208}}
2023-11-07T12:21:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "coastal3" More Information needed
[ "# Dataset Card for \"coastal3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"coastal3\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"coastal3\"\n\nMore Information needed" ]
dfe87f10d92aaf03bf034d07f06d15739ed0818d
# Farfetch web scraped data\ \ ## About the website\ \ Farfetch is a British-Portuguese online luxury fashion retail platform **founded in 2007** by Portuguese entrepreneur José Neves. With its head office located in London, UK, the company also operates from offices in Portugal, Los Angeles, Tokyo, Shanghai and Brazil among others. Farfetch operates a unique business model, in which it doesn hold any inventory but acts as a marketplace that connects buyers and sellers globally. The companys sales come mainly from commissions on the sales of third-party vendors. **Farfetch went public on the New York Stock Exchange** (symbol: FTCH) on September 21st, 2018, raising approximately $885 million and achieving a market cap of nearly $6.2 billion at the IPO. More information about Farfetch can be found on its [Wikipedia](https://en.wikipedia.org/wiki/Farfetch) page and [Bloomberg](https://www.bloomberg.com/profile/company/FTCH:US) page.\ \ Some of **Farfetchs main competitors** in the fashion e-commerce market include Net-A-Porter, MatchesFashion, and SSENSE. [Net-A-Porter](https://www.net-a-porter.com/) is a premier online luxury fashion destination founded by Natalie Massenet in 2000. The company operates on a similar business model as Farfetch, but in addition to selling designer clothes and accessories, it also offers content such as fashion news and trends. MatchesFashion, a UK based company like Farfetch, is a global luxury-shopping site that blends online shopping with retail boutiques. [SSENSE](https://www.ssense.com/), on the other hand, is a Canadian luxury retailer that offers a curated selection of over 500 emerging designers and established brands. \ \ One of the benefits of **web scraping for a company like Farfetch** is to use this factual data to make more informed business decisions. For example, data scrapers can be used to scrape pricing and discount information from competitor websites to assess their pricing strategies and make adjustments accordingly. They can also use the data to track price fluctuations over time, find out when competitors are offering sales and discounts, and reduce their prices or run promotions in order to stay competitive. Web scraped data could also help them conduct market research, where they can analyze what kind of products are popular, what brands people are buying most, what are the current fashion trends, among other things. This data could help them make decisions on what products to add to their platform, how to price them, how much inventory to maintain and at what times, etc. Overall, web scraped data can enable businesses to make data-driven decisions, stay competitive, and increase sales and revenue.\ \ \ ## Link to **dataset**\ \ [Farfetch dataset](https://www.databoutique.com/buy-data-list-subset/Farfetch%20web%20scraped%20data/r/recnDOxe6YYa3C7Ib)
PigiVinciDBQ/Farfetch
[ "region:us" ]
2023-11-07T10:08:30+00:00
{}
2023-11-07T10:10:48+00:00
[]
[]
TAGS #region-us
# Farfetch web scraped data\ \ ## About the website\ \ Farfetch is a British-Portuguese online luxury fashion retail platform founded in 2007 by Portuguese entrepreneur José Neves. With its head office located in London, UK, the company also operates from offices in Portugal, Los Angeles, Tokyo, Shanghai and Brazil among others. Farfetch operates a unique business model, in which it doesn hold any inventory but acts as a marketplace that connects buyers and sellers globally. The companys sales come mainly from commissions on the sales of third-party vendors. Farfetch went public on the New York Stock Exchange (symbol: FTCH) on September 21st, 2018, raising approximately $885 million and achieving a market cap of nearly $6.2 billion at the IPO. More information about Farfetch can be found on its Wikipedia page and Bloomberg page.\ \ Some of Farfetchs main competitors in the fashion e-commerce market include Net-A-Porter, MatchesFashion, and SSENSE. Net-A-Porter is a premier online luxury fashion destination founded by Natalie Massenet in 2000. The company operates on a similar business model as Farfetch, but in addition to selling designer clothes and accessories, it also offers content such as fashion news and trends. MatchesFashion, a UK based company like Farfetch, is a global luxury-shopping site that blends online shopping with retail boutiques. SSENSE, on the other hand, is a Canadian luxury retailer that offers a curated selection of over 500 emerging designers and established brands. \ \ One of the benefits of web scraping for a company like Farfetch is to use this factual data to make more informed business decisions. For example, data scrapers can be used to scrape pricing and discount information from competitor websites to assess their pricing strategies and make adjustments accordingly. They can also use the data to track price fluctuations over time, find out when competitors are offering sales and discounts, and reduce their prices or run promotions in order to stay competitive. Web scraped data could also help them conduct market research, where they can analyze what kind of products are popular, what brands people are buying most, what are the current fashion trends, among other things. This data could help them make decisions on what products to add to their platform, how to price them, how much inventory to maintain and at what times, etc. Overall, web scraped data can enable businesses to make data-driven decisions, stay competitive, and increase sales and revenue.\ \ \ ## Link to dataset\ \ Farfetch dataset
[ "# Farfetch web scraped data\\\n\\\n ## About the website\\\n\\\n Farfetch is a British-Portuguese online luxury fashion retail platform founded in 2007 by Portuguese entrepreneur José Neves. With its head office located in London, UK, the company also operates from offices in Portugal, Los Angeles, Tokyo, Shanghai and Brazil among others. Farfetch operates a unique business model, in which it doesn\t hold any inventory but acts as a marketplace that connects buyers and sellers globally. The companys sales come mainly from commissions on the sales of third-party vendors. Farfetch went public on the New York Stock Exchange (symbol: FTCH) on September 21st, 2018, raising approximately $885 million and achieving a market cap of nearly $6.2 billion at the IPO. More information about Farfetch can be found on its Wikipedia page and Bloomberg page.\\\n\\\nSome of Farfetchs main competitors in the fashion e-commerce market include Net-A-Porter, MatchesFashion, and SSENSE. Net-A-Porter is a premier online luxury fashion destination founded by Natalie Massenet in 2000. The company operates on a similar business model as Farfetch, but in addition to selling designer clothes and accessories, it also offers content such as fashion news and trends. MatchesFashion, a UK based company like Farfetch, is a global luxury-shopping site that blends online shopping with retail boutiques. SSENSE, on the other hand, is a Canadian luxury retailer that offers a curated selection of over 500 emerging designers and established brands. \\\n\\\nOne of the benefits of web scraping for a company like Farfetch is to use this factual data to make more informed business decisions. For example, data scrapers can be used to scrape pricing and discount information from competitor websites to assess their pricing strategies and make adjustments accordingly. They can also use the data to track price fluctuations over time, find out when competitors are offering sales and discounts, and reduce their prices or run promotions in order to stay competitive. Web scraped data could also help them conduct market research, where they can analyze what kind of products are popular, what brands people are buying most, what are the current fashion trends, among other things. This data could help them make decisions on what products to add to their platform, how to price them, how much inventory to maintain and at what times, etc. Overall, web scraped data can enable businesses to make data-driven decisions, stay competitive, and increase sales and revenue.\\\n\\\n\\\n ## Link to dataset\\\n\\\n Farfetch dataset" ]
[ "TAGS\n#region-us \n", "# Farfetch web scraped data\\\n\\\n ## About the website\\\n\\\n Farfetch is a British-Portuguese online luxury fashion retail platform founded in 2007 by Portuguese entrepreneur José Neves. With its head office located in London, UK, the company also operates from offices in Portugal, Los Angeles, Tokyo, Shanghai and Brazil among others. Farfetch operates a unique business model, in which it doesn\t hold any inventory but acts as a marketplace that connects buyers and sellers globally. The companys sales come mainly from commissions on the sales of third-party vendors. Farfetch went public on the New York Stock Exchange (symbol: FTCH) on September 21st, 2018, raising approximately $885 million and achieving a market cap of nearly $6.2 billion at the IPO. More information about Farfetch can be found on its Wikipedia page and Bloomberg page.\\\n\\\nSome of Farfetchs main competitors in the fashion e-commerce market include Net-A-Porter, MatchesFashion, and SSENSE. Net-A-Porter is a premier online luxury fashion destination founded by Natalie Massenet in 2000. The company operates on a similar business model as Farfetch, but in addition to selling designer clothes and accessories, it also offers content such as fashion news and trends. MatchesFashion, a UK based company like Farfetch, is a global luxury-shopping site that blends online shopping with retail boutiques. SSENSE, on the other hand, is a Canadian luxury retailer that offers a curated selection of over 500 emerging designers and established brands. \\\n\\\nOne of the benefits of web scraping for a company like Farfetch is to use this factual data to make more informed business decisions. For example, data scrapers can be used to scrape pricing and discount information from competitor websites to assess their pricing strategies and make adjustments accordingly. They can also use the data to track price fluctuations over time, find out when competitors are offering sales and discounts, and reduce their prices or run promotions in order to stay competitive. Web scraped data could also help them conduct market research, where they can analyze what kind of products are popular, what brands people are buying most, what are the current fashion trends, among other things. This data could help them make decisions on what products to add to their platform, how to price them, how much inventory to maintain and at what times, etc. Overall, web scraped data can enable businesses to make data-driven decisions, stay competitive, and increase sales and revenue.\\\n\\\n\\\n ## Link to dataset\\\n\\\n Farfetch dataset" ]
[ 6, 598 ]
[ "passage: TAGS\n#region-us \n" ]
58dcef83a63cccebacd3e786afd73181cc9175e5
# 🏟️ Long Code Arena (Commit Message Generation) This is the benchmark for Commit Message Generation task as part of 🏟️ [Long Code Arena benchmark](https://huggingface.co/spaces/JetBrains-Research/long-code-arena). Current version is a manually curated subset of the Python test set from the 🤗 [CommitChronicle dataset](https://huggingface.co/datasets/JetBrains-Research/commit-chronicle), tailored for larger commits. ## How-to 1. List all the available configs via [`datasets.get_dataset_config_names`](https://huggingface.co/docs/datasets/v2.14.3/en/package_reference/loading_methods#datasets.get_dataset_config_names) and choose an appropriate one. Current configs: `commitchronicle-py-long`, `commitchronicle-py-long-labels` 2. Load the data via [`load_dataset`](https://huggingface.co/docs/datasets/v2.14.3/en/package_reference/loading_methods#datasets.load_dataset): ``` from datasets import load_dataset configuration = "TODO" # select a configuration dataset = load_dataset("JetBrains-Research/lca-cmg", configuration, split="test") ``` Note that all the data we have is considered to be in the test split. **Note.** Working with git repositories under [`repos`](https://huggingface.co/datasets/JetBrains-Research/lca-cmg/tree/main/repos) directory is not supported via 🤗 Datasets. Download and extract the contents of each repository. We provide a full list of files in [`paths.json`](https://huggingface.co/datasets/JetBrains-Research/lca-cmg/blob/main/paths.json). ## Dataset Structure This dataset contains three kinds of data: * *full data* about each commit (including modifications) * metadata with quality *labels* * compressed *git repositories* ### Full data This section concerns configuration with *full data* about each commit (no `-labels` suffix). Each example has the following fields: | **Field** | **Description** | |:---------:|:-----------------------------------------:| | `repo` | Commit repository. | | `hash` | Commit hash. | | `date` | Commit date. | | `license` | Commit repository's license. | | `message` | Commit message. | | `mods` | List of file modifications from a commit. | Each file modification has the following fields: | **Field** | **Description** | |:-------------:|:-------------------------------------------------------------------------------------------------:| | `change_type` | Type of change to current file. One of: `ADD`, `COPY`, `RENAME`, `DELETE`, `MODIFY` or `UNKNOWN`. | | `old_path` | Path to file before change (might be empty). | | `new_path` | Path to file after change (might be empty). | | `diff` | `git diff` for current file. | Data point example: ``` {'hash': 'b76ed0db81b3123ede5dc5e5f1bddf36336f3722', 'repo': 'apache/libcloud', 'date': '05.03.2022 17:52:34', 'license': 'Apache License 2.0', 'message': 'Add tests which verify that all OpenStack driver can be instantiated\nwith all the supported auth versions.\nNOTE: Those tests will fail right now due to the regressions being\nintroduced recently which breaks auth for some versions.', 'mods': [{'change_type': 'MODIFY', 'new_path': 'libcloud/test/compute/test_openstack.py', 'old_path': 'libcloud/test/compute/test_openstack.py', 'diff': '@@ -39,6 +39,7 @@ from libcloud.utils.py3 import u\n<...>'}], } ``` ### Labels This section concerns configuration with metadata and *labels* (with `-labels` suffix). Each example has the following fields: | **Field** | **Description** | |:---------:|:------------------------------------------------------------------:| | `repo` | Commit repository. | | `hash` | Commit hash. | | `date` | Commit date. | | `license` | Commit repository's license. | | `message` | Commit message. | | `label` | Label of current commit as a target for CMG task. | | `comment` | Comment for a label for current commit (optional, might be empty). | Labels are in 1-5 scale, where: * 1 – strong no * 2 – weak no * 3 – unsure * 4 – weak yes * 5 – strong yes Data point example: ``` {'hash': '1559a4c686ddc2947fc3606e1c4279062cc9480f', 'repo': 'appscale/gts', 'date': '15.07.2018 21:00:39', 'license': 'Apache License 2.0', 'message': 'Add auto_id_policy and logs_path flags\n\nThese changes were introduced in the 1.7.5 SDK.', 'label': 1, 'comment': 'no way to know the version'} ``` ### Git Repositories This section concerns [`repos`](https://huggingface.co/datasets/JetBrains-Research/lca-cmg/tree/main/repos) directory, which stores compressed Git repositories for all the commits in this benchmark. After you download and extract it, you can work with each repository either via Git or via Python libraries like [GitPython](https://github.com/gitpython-developers/GitPython) or [PyDriller](https://github.com/ishepard/pydriller).
JetBrains-Research/lca-commit-message-generation
[ "region:us" ]
2023-11-07T10:09:18+00:00
{"dataset_info": [{"config_name": "commitchronicle-py-long", "features": [{"name": "hash", "dtype": "string"}, {"name": "repo", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "license", "dtype": "string"}, {"name": "message", "dtype": "string"}, {"name": "mods", "list": [{"name": "change_type", "dtype": "string"}, {"name": "old_path", "dtype": "string"}, {"name": "new_path", "dtype": "string"}, {"name": "diff", "dtype": "string"}]}], "splits": [{"name": "test", "num_examples": 163}]}, {"config_name": "commitchronicle-py-long-labels", "features": [{"name": "hash", "dtype": "string"}, {"name": "repo", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "license", "dtype": "string"}, {"name": "message", "dtype": "string"}, {"name": "label", "dtype": "int8"}, {"name": "comment", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 272359, "num_examples": 858}]}], "configs": [{"config_name": "commitchronicle-py-long", "data_files": [{"split": "test", "path": "commitchronicle-py-long/test-*"}]}, {"config_name": "commitchronicle-py-long-labels", "data_files": [{"split": "test", "path": "commitchronicle-py-long-labels/test-*"}]}]}
2024-02-06T11:23:30+00:00
[]
[]
TAGS #region-us
️ Long Code Arena (Commit Message Generation) ============================================= This is the benchmark for Commit Message Generation task as part of ️ Long Code Arena benchmark. Current version is a manually curated subset of the Python test set from the CommitChronicle dataset, tailored for larger commits. How-to ------ 1. List all the available configs via 'datasets.get\_dataset\_config\_names' and choose an appropriate one. Current configs: 'commitchronicle-py-long', 'commitchronicle-py-long-labels' 2. Load the data via 'load\_dataset': Note that all the data we have is considered to be in the test split. Note. Working with git repositories under 'repos' directory is not supported via Datasets. Download and extract the contents of each repository. We provide a full list of files in 'URL'. Dataset Structure ----------------- This dataset contains three kinds of data: * *full data* about each commit (including modifications) * metadata with quality *labels* * compressed *git repositories* ### Full data This section concerns configuration with *full data* about each commit (no '-labels' suffix). Each example has the following fields: Each file modification has the following fields: Data point example: ### Labels This section concerns configuration with metadata and *labels* (with '-labels' suffix). Each example has the following fields: Labels are in 1-5 scale, where: * 1 – strong no * 2 – weak no * 3 – unsure * 4 – weak yes * 5 – strong yes Data point example: ### Git Repositories This section concerns 'repos' directory, which stores compressed Git repositories for all the commits in this benchmark. After you download and extract it, you can work with each repository either via Git or via Python libraries like GitPython or PyDriller.
[ "### Full data\n\n\nThis section concerns configuration with *full data* about each commit (no '-labels' suffix).\n\n\nEach example has the following fields:\n\n\n\nEach file modification has the following fields:\n\n\n\nData point example:", "### Labels\n\n\nThis section concerns configuration with metadata and *labels* (with '-labels' suffix).\n\n\nEach example has the following fields:\n\n\n\nLabels are in 1-5 scale, where:\n\n\n* 1 – strong no\n* 2 – weak no\n* 3 – unsure\n* 4 – weak yes\n* 5 – strong yes\n\n\nData point example:", "### Git Repositories\n\n\nThis section concerns 'repos' directory,\nwhich stores compressed Git repositories for all the commits in this benchmark. After you download and extract it, you\ncan work with each repository either via Git or via Python libraries\nlike GitPython\nor PyDriller." ]
[ "TAGS\n#region-us \n", "### Full data\n\n\nThis section concerns configuration with *full data* about each commit (no '-labels' suffix).\n\n\nEach example has the following fields:\n\n\n\nEach file modification has the following fields:\n\n\n\nData point example:", "### Labels\n\n\nThis section concerns configuration with metadata and *labels* (with '-labels' suffix).\n\n\nEach example has the following fields:\n\n\n\nLabels are in 1-5 scale, where:\n\n\n* 1 – strong no\n* 2 – weak no\n* 3 – unsure\n* 4 – weak yes\n* 5 – strong yes\n\n\nData point example:", "### Git Repositories\n\n\nThis section concerns 'repos' directory,\nwhich stores compressed Git repositories for all the commits in this benchmark. After you download and extract it, you\ncan work with each repository either via Git or via Python libraries\nlike GitPython\nor PyDriller." ]
[ 6, 49, 74, 74 ]
[ "passage: TAGS\n#region-us \n### Full data\n\n\nThis section concerns configuration with *full data* about each commit (no '-labels' suffix).\n\n\nEach example has the following fields:\n\n\n\nEach file modification has the following fields:\n\n\n\nData point example:### Labels\n\n\nThis section concerns configuration with metadata and *labels* (with '-labels' suffix).\n\n\nEach example has the following fields:\n\n\n\nLabels are in 1-5 scale, where:\n\n\n* 1 – strong no\n* 2 – weak no\n* 3 – unsure\n* 4 – weak yes\n* 5 – strong yes\n\n\nData point example:### Git Repositories\n\n\nThis section concerns 'repos' directory,\nwhich stores compressed Git repositories for all the commits in this benchmark. After you download and extract it, you\ncan work with each repository either via Git or via Python libraries\nlike GitPython\nor PyDriller." ]
4efbe6b90a17d1e5be5f6608a2c21aaf2c8315e8
# Dataset Card for Multilingual European Datasets for Sensitive Entity Detection in the Legal Domain ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Split Distribution](#split-distribution) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - ** Repository:** [Spanish](https://elrc-share.eu/repository/browse/mapa-anonymization-package-spanish/b550e1a88a8311ec9c1a00155d026706687917f92f64482587c6382175dffd76/), [Most](https://elrc-share.eu/repository/search/?q=mfsp:3222a6048a8811ec9c1a00155d0267067eb521077db54d6684fb14ce8491a391), [German, Portuguese, Slovak, Slovenian, Swedish](https://elrc-share.eu/repository/search/?q=mfsp:833df1248a8811ec9c1a00155d0267067685dcdb77064822b51cc16ab7b81a36) - **Paper:** de Gibert Bonet, O., García Pablos, A., Cuadros, M., & Melero, M. (2022). Spanish Datasets for Sensitive Entity Detection in the Legal Domain. Proceedings of the Language Resources and Evaluation Conference, June, 3751–3760. http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.400.pdf - **Leaderboard:** - **Point of Contact:** [Joel Niklaus](mailto:[email protected]) ### Dataset Summary This dataset is a completed version of the [MAPA](https://huggingface.co/datasets/joelniklaus/mapa) EUR-LEX dataset, originally converted to Huggingface format by [joelniklaus](https://huggingface.co/joelniklaus). See the [dataset card](https://huggingface.co/datasets/joelniklaus/mapa) for more information about MAPA. 3 of the (Spanish) EUR-LEX WebAnno TSV files in the source MAPA repository are malformed, so they were omitted from the [original conversion](https://huggingface.co/datasets/joelniklaus/mapa), causing under-representation of the Spanish language. These files were repaired manually, and the whole dataset reparsed using joelniklaus' [conversion script](https://huggingface.co/datasets/joelniklaus/mapa/blob/main/convert_to_hf_dataset.py). The script was modified slightly to include the original sentence of each example in the "sentence" column. ### Split Distribution For all languages other than Spanish, [joelniklaus](https://huggingface.co/datasets/joelniklaus)' dataset splits have been preserved for consistency. The split of Spanish samples has changed due to the availability of more data. Optionally, to create balanced splits with improved distribution of labelled entities, use the following: ``` from datasets import load_dataset, concatenate_datasets mapa = load_dataset("dglover1/mapa-eur-lex") mapa = concatenate_datasets((mapa["train"], mapa["validation"], mapa["test"])) mapa = mapa.train_test_split(test_size=0.2, seed=1) mapa = mapa.flatten_indices() ``` Note that this only creates train/test splits. For train/test/validation, you can further split either train or test and rename accordingly. ### Licensing Information [Attribution 4.0 International (CC BY 4.0) ](https://creativecommons.org/licenses/by/4.0/) ### Citation Information ``` @article{DeGibertBonet2022, author = {{de Gibert Bonet}, Ona and {Garc{\'{i}}a Pablos}, Aitor and Cuadros, Montse and Melero, Maite}, journal = {Proceedings of the Language Resources and Evaluation Conference}, number = {June}, pages = {3751--3760}, title = {{Spanish Datasets for Sensitive Entity Detection in the Legal Domain}}, url = {https://aclanthology.org/2022.lrec-1.400}, year = {2022} } ``` ### Contributions Thanks to [@JoelNiklaus](https://github.com/joelniklaus) and [@kapllan](https://github.com/kapllan) for adding this dataset.
dglover1/mapa-eur-lex
[ "task_categories:token-classification", "task_ids:named-entity-recognition", "annotations_creators:other", "language_creators:found", "multilinguality:multilingual", "size_categories:1K<n<10K", "source_datasets:joelniklaus/mapa", "language:multilingual", "language:bg", "language:cs", "language:da", "language:de", "language:el", "language:en", "language:es", "language:et", "language:fi", "language:fr", "language:ga", "language:hu", "language:it", "language:lt", "language:lv", "language:mt", "language:nl", "language:pt", "language:ro", "language:sk", "language:sv", "license:cc-by-4.0", "named-entity-recognition-and-classification", "region:us" ]
2023-11-07T10:11:34+00:00
{"annotations_creators": ["other"], "language_creators": ["found"], "language": ["multilingual", "bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hu", "it", "lt", "lv", "mt", "nl", "pt", "ro", "sk", "sv"], "license": ["cc-by-4.0"], "multilinguality": ["multilingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["joelniklaus/mapa"], "task_categories": ["token-classification"], "task_ids": ["named-entity-recognition"], "pretty_name": "Spanish Datasets for Sensitive Entity Detection in the Legal Domain", "tags": ["named-entity-recognition-and-classification"]}
2024-01-29T08:44:47+00:00
[]
[ "multilingual", "bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hu", "it", "lt", "lv", "mt", "nl", "pt", "ro", "sk", "sv" ]
TAGS #task_categories-token-classification #task_ids-named-entity-recognition #annotations_creators-other #language_creators-found #multilinguality-multilingual #size_categories-1K<n<10K #source_datasets-joelniklaus/mapa #language-multilingual #language-Bulgarian #language-Czech #language-Danish #language-German #language-Modern Greek (1453-) #language-English #language-Spanish #language-Estonian #language-Finnish #language-French #language-Irish #language-Hungarian #language-Italian #language-Lithuanian #language-Latvian #language-Maltese #language-Dutch #language-Portuguese #language-Romanian #language-Slovak #language-Swedish #license-cc-by-4.0 #named-entity-recognition-and-classification #region-us
# Dataset Card for Multilingual European Datasets for Sensitive Entity Detection in the Legal Domain ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Split Distribution - Additional Information - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: - Repository: Spanish, Most, German, Portuguese, Slovak, Slovenian, Swedish - Paper: de Gibert Bonet, O., García Pablos, A., Cuadros, M., & Melero, M. (2022). Spanish Datasets for Sensitive Entity Detection in the Legal Domain. Proceedings of the Language Resources and Evaluation Conference, June, 3751–3760. URL - Leaderboard: - Point of Contact: Joel Niklaus ### Dataset Summary This dataset is a completed version of the MAPA EUR-LEX dataset, originally converted to Huggingface format by joelniklaus. See the dataset card for more information about MAPA. 3 of the (Spanish) EUR-LEX WebAnno TSV files in the source MAPA repository are malformed, so they were omitted from the original conversion, causing under-representation of the Spanish language. These files were repaired manually, and the whole dataset reparsed using joelniklaus' conversion script. The script was modified slightly to include the original sentence of each example in the "sentence" column. ### Split Distribution For all languages other than Spanish, joelniklaus' dataset splits have been preserved for consistency. The split of Spanish samples has changed due to the availability of more data. Optionally, to create balanced splits with improved distribution of labelled entities, use the following: Note that this only creates train/test splits. For train/test/validation, you can further split either train or test and rename accordingly. ### Licensing Information Attribution 4.0 International (CC BY 4.0) ### Contributions Thanks to @JoelNiklaus and @kapllan for adding this dataset.
[ "# Dataset Card for Multilingual European Datasets for Sensitive Entity Detection in the Legal Domain", "## Table of Contents\n\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n\n- Split Distribution\n \n- Additional Information\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- \n Repository: Spanish, Most, German, Portuguese, Slovak, Slovenian, Swedish\n- Paper: de Gibert Bonet, O., García Pablos, A., Cuadros, M., & Melero, M. (2022). Spanish Datasets for Sensitive\n Entity Detection in the Legal Domain. Proceedings of the Language Resources and Evaluation Conference, June,\n 3751–3760. URL\n- Leaderboard:\n- Point of Contact: Joel Niklaus", "### Dataset Summary\n\nThis dataset is a completed version of the MAPA EUR-LEX dataset, originally converted to Huggingface format by joelniklaus. See the dataset card for more information about MAPA.\n\n3 of the (Spanish) EUR-LEX WebAnno TSV files in the source MAPA repository are malformed, so they were omitted from the original conversion, causing under-representation of the Spanish language.\nThese files were repaired manually, and the whole dataset reparsed using joelniklaus' conversion script. The script was modified slightly to include the original sentence of each example in the \"sentence\" column.", "### Split Distribution\n\nFor all languages other than Spanish, joelniklaus' dataset splits have been preserved for consistency. The split of Spanish samples has changed due to the availability of more data.\n\nOptionally, to create balanced splits with improved distribution of labelled entities, use the following:\n\n\nNote that this only creates train/test splits. For train/test/validation, you can further split either train or test and rename accordingly.", "### Licensing Information\n\nAttribution 4.0 International (CC BY 4.0)", "### Contributions\n\nThanks to @JoelNiklaus and @kapllan for adding this\ndataset." ]
[ "TAGS\n#task_categories-token-classification #task_ids-named-entity-recognition #annotations_creators-other #language_creators-found #multilinguality-multilingual #size_categories-1K<n<10K #source_datasets-joelniklaus/mapa #language-multilingual #language-Bulgarian #language-Czech #language-Danish #language-German #language-Modern Greek (1453-) #language-English #language-Spanish #language-Estonian #language-Finnish #language-French #language-Irish #language-Hungarian #language-Italian #language-Lithuanian #language-Latvian #language-Maltese #language-Dutch #language-Portuguese #language-Romanian #language-Slovak #language-Swedish #license-cc-by-4.0 #named-entity-recognition-and-classification #region-us \n", "# Dataset Card for Multilingual European Datasets for Sensitive Entity Detection in the Legal Domain", "## Table of Contents\n\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n\n- Split Distribution\n \n- Additional Information\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- \n Repository: Spanish, Most, German, Portuguese, Slovak, Slovenian, Swedish\n- Paper: de Gibert Bonet, O., García Pablos, A., Cuadros, M., & Melero, M. (2022). Spanish Datasets for Sensitive\n Entity Detection in the Legal Domain. Proceedings of the Language Resources and Evaluation Conference, June,\n 3751–3760. URL\n- Leaderboard:\n- Point of Contact: Joel Niklaus", "### Dataset Summary\n\nThis dataset is a completed version of the MAPA EUR-LEX dataset, originally converted to Huggingface format by joelniklaus. See the dataset card for more information about MAPA.\n\n3 of the (Spanish) EUR-LEX WebAnno TSV files in the source MAPA repository are malformed, so they were omitted from the original conversion, causing under-representation of the Spanish language.\nThese files were repaired manually, and the whole dataset reparsed using joelniklaus' conversion script. The script was modified slightly to include the original sentence of each example in the \"sentence\" column.", "### Split Distribution\n\nFor all languages other than Spanish, joelniklaus' dataset splits have been preserved for consistency. The split of Spanish samples has changed due to the availability of more data.\n\nOptionally, to create balanced splits with improved distribution of labelled entities, use the following:\n\n\nNote that this only creates train/test splits. For train/test/validation, you can further split either train or test and rename accordingly.", "### Licensing Information\n\nAttribution 4.0 International (CC BY 4.0)", "### Contributions\n\nThanks to @JoelNiklaus and @kapllan for adding this\ndataset." ]
[ 234, 24, 41, 114, 146, 106, 14, 22 ]
[ "passage: TAGS\n#task_categories-token-classification #task_ids-named-entity-recognition #annotations_creators-other #language_creators-found #multilinguality-multilingual #size_categories-1K<n<10K #source_datasets-joelniklaus/mapa #language-multilingual #language-Bulgarian #language-Czech #language-Danish #language-German #language-Modern Greek (1453-) #language-English #language-Spanish #language-Estonian #language-Finnish #language-French #language-Irish #language-Hungarian #language-Italian #language-Lithuanian #language-Latvian #language-Maltese #language-Dutch #language-Portuguese #language-Romanian #language-Slovak #language-Swedish #license-cc-by-4.0 #named-entity-recognition-and-classification #region-us \n# Dataset Card for Multilingual European Datasets for Sensitive Entity Detection in the Legal Domain## Table of Contents\n\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n\n- Split Distribution\n \n- Additional Information\n - Licensing Information\n - Citation Information\n - Contributions## Dataset Description\n\n- Homepage:\n- \n Repository: Spanish, Most, German, Portuguese, Slovak, Slovenian, Swedish\n- Paper: de Gibert Bonet, O., García Pablos, A., Cuadros, M., & Melero, M. (2022). Spanish Datasets for Sensitive\n Entity Detection in the Legal Domain. Proceedings of the Language Resources and Evaluation Conference, June,\n 3751–3760. URL\n- Leaderboard:\n- Point of Contact: Joel Niklaus" ]
0952706ff416ce89a906493daa5b4044455825f4
TODO: Add YAML tags here. Copy-paste the tags obtained with the online tagging app: https://huggingface.co/spaces/huggingface/datasets-tagging --- # Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
SerahAKojenu/Masakhane-news
[ "task_categories:text-classification", "size_categories:n<1K", "language:en", "language:yo", "biology", "finance", "region:us" ]
2023-11-07T10:15:57+00:00
{"language": ["en", "yo"], "size_categories": ["n<1K"], "task_categories": ["text-classification"], "tags": ["biology", "finance"]}
2023-11-07T10:41:51+00:00
[]
[ "en", "yo" ]
TAGS #task_categories-text-classification #size_categories-n<1K #language-English #language-Yoruba #biology #finance #region-us
TODO: Add YAML tags here. Copy-paste the tags obtained with the online tagging app: URL --- # Dataset Card for [Dataset Name] ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @github-username for adding this dataset.
[ "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @github-username for adding this dataset." ]
[ "TAGS\n#task_categories-text-classification #size_categories-n<1K #language-English #language-Yoruba #biology #finance #region-us \n", "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @github-username for adding this dataset." ]
[ 42, 10, 125, 24, 6, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 19 ]
[ "passage: TAGS\n#task_categories-text-classification #size_categories-n<1K #language-English #language-Yoruba #biology #finance #region-us \n# Dataset Card for [Dataset Name]## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:### Dataset Summary### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions\n\nThanks to @github-username for adding this dataset." ]
7c0afaa011acbb1f7a39726f01e69bbc7fc83483
# Dataset Card for "donutpreparedFinetuneDataGenreted" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
aminlouhichi/donutpreparedFinetuneDataGenreted
[ "region:us" ]
2023-11-07T10:34:12+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "ground_truth", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 27904115.0, "num_examples": 128}, {"name": "validation", "num_bytes": 13089836.0, "num_examples": 60}, {"name": "test", "num_bytes": 13111083.0, "num_examples": 59}], "download_size": 50588060, "dataset_size": 54105034.0}}
2023-11-07T10:34:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "donutpreparedFinetuneDataGenreted" More Information needed
[ "# Dataset Card for \"donutpreparedFinetuneDataGenreted\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"donutpreparedFinetuneDataGenreted\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"donutpreparedFinetuneDataGenreted\"\n\nMore Information needed" ]
cf2f9ef89168d31cb09e42993d35b068688fe0df
Digitial Typhoon Dataset: KITAMOTO, A., HWANG, J., VUILLOD, B., GAUTIER, L., TIAN, Y., & CLANUWAT, T. (2023, December). Digital Typhoon: Long-term Satellite Image Dataset for the Spatio-Temporal Modeling of Tropical Cyclones. NeurIPS 2023 Datasets and Benchmarks (Spotlight). This dataset was created by the [Digital Typhoon](http://agora.ex.nii.ac.jp/digital-typhoon/) project.
torchgeo/digital_typhoon
[ "size_categories:100K<n<1M", "license:cc-by-4.0", "region:us" ]
2023-11-07T10:37:52+00:00
{"license": "cc-by-4.0", "size_categories": ["100K<n<1M"]}
2023-12-18T11:04:35+00:00
[]
[]
TAGS #size_categories-100K<n<1M #license-cc-by-4.0 #region-us
Digitial Typhoon Dataset: KITAMOTO, A., HWANG, J., VUILLOD, B., GAUTIER, L., TIAN, Y., & CLANUWAT, T. (2023, December). Digital Typhoon: Long-term Satellite Image Dataset for the Spatio-Temporal Modeling of Tropical Cyclones. NeurIPS 2023 Datasets and Benchmarks (Spotlight). This dataset was created by the Digital Typhoon project.
[]
[ "TAGS\n#size_categories-100K<n<1M #license-cc-by-4.0 #region-us \n" ]
[ 27 ]
[ "passage: TAGS\n#size_categories-100K<n<1M #license-cc-by-4.0 #region-us \n" ]
87d2dd53e17aa5e8725a671fc547c454f36d1142
# Dataset Card for "parquet_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ShuoShuoShuo/parquet_test
[ "region:us" ]
2023-11-07T10:57:30+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2612928248.18, "num_examples": 22382}], "download_size": 2598303734, "dataset_size": 2612928248.18}}
2023-11-29T09:20:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "parquet_test" More Information needed
[ "# Dataset Card for \"parquet_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"parquet_test\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"parquet_test\"\n\nMore Information needed" ]
8582000f3769db8e19720efabd0692ad2edfde41
# Description The Pavia Centre and University are two scenes acquired by the [ROSIS](http://www.opairs.aero/rosis_en.html) sensor during a flight campaign over Pavia, nothern Italy. The number of spectral bands is 102 for Pavia Centre and 103 for Pavia University. Pavia Centre is a 1096 $\times$ 1096 pixels image, and Pavia University is 610 $\times$ 610 pixels, but some of the samples in both images contain no information and have to be discarded before the analysis. The geometric resolution is 1.3 meters. Both image groundtruths differenciate 9 classes each. It can be seen the discarded samples in the figures as abroad black strips. # Characteristics **Groundtruth classes for the Pavia centre scene and their respective samples number** | # | Class | Samples | |---|----------------------|---------| | 1 | Water | 824 | | 2 | Trees | 820 | | 3 | Asphalt | 816 | | 4 | Self-Blocking Bricks | 808 | | 5 | Bitumen | 808 | | 6 | Tiles | 1260 | | 7 | Shadows | 476 | | 8 | Meadows | 824 | | 9 | Bare Soil | 820 | **Groundtruth classes for the Pavia University scene and their respective samples number** | # | Class | Samples | |---|----------------------|---------| | 1 | Asphalt | 6631 | | 2 | Meadows | 18649 | | 3 | Gravel | 2099 | | 4 | Trees | 3064 | | 5 | Painted metal sheets | 1345 | | 6 | Bare Soil | 5029 | | 7 | Bitumen | 1330 | | 8 | Self-Blocking Bricks | 3682 | | 9 | Shadows | 947 | # Quick look <figure> <img src= "assets/Pavia_60.png" alt="Pavia" width="300" /> <figcaption>Sample band of Pavia Centre dataset.</figcaption> </figure> <figure> <img src= "assets/Pavia_gt.png" alt="Pavia gt" width="300" /> <figcaption>Groundtruth of Pavia Centre dataset.</figcaption> </figure> <figure> <img src= "assets/PaviaU_60.png" alt="PaviaU" width="300" /> <figcaption>Sample band of Pavia University dataset.</figcaption> </figure> <figure> <img src= "assets/PaviaU_gt.png" alt="PaviaU gt" width="300" /> <figcaption>Groundtruth of Pavia University dataset.</figcaption> </figure> # Credits Pavia scenes were provided by [Prof. Paolo Gamba](http://tlclab.unipv.it/sito_tlc/people.do?id=pgamba) from the [Telecommunications and Remote Sensing Laboratory](http://tlclab.unipv.it/), [Pavia university](http://www.unipv.eu/) (Italy). This dataset was originally collected by Manuel Graña, Miguel-Angel Veganzones, Borja Ayerdi. The original link for the dataset is available below: https://www.ehu.eus/ccwintco/index.php/Hyperspectral_Remote_Sensing_Scenes
danaroth/pavia
[ "license:unknown", "region:us" ]
2023-11-07T10:58:25+00:00
{"license": "unknown"}
2023-11-09T17:57:39+00:00
[]
[]
TAGS #license-unknown #region-us
Description =========== The Pavia Centre and University are two scenes acquired by the ROSIS sensor during a flight campaign over Pavia, nothern Italy. The number of spectral bands is 102 for Pavia Centre and 103 for Pavia University. Pavia Centre is a 1096 $\times$ 1096 pixels image, and Pavia University is 610 $\times$ 610 pixels, but some of the samples in both images contain no information and have to be discarded before the analysis. The geometric resolution is 1.3 meters. Both image groundtruths differenciate 9 classes each. It can be seen the discarded samples in the figures as abroad black strips. Characteristics =============== Groundtruth classes for the Pavia centre scene and their respective samples number #: 1, Class: Water, Samples: 824 #: 2, Class: Trees, Samples: 820 #: 3, Class: Asphalt, Samples: 816 #: 4, Class: Self-Blocking Bricks, Samples: 808 #: 5, Class: Bitumen, Samples: 808 #: 6, Class: Tiles, Samples: 1260 #: 7, Class: Shadows, Samples: 476 #: 8, Class: Meadows, Samples: 824 #: 9, Class: Bare Soil, Samples: 820 Groundtruth classes for the Pavia University scene and their respective samples number #: 1, Class: Asphalt, Samples: 6631 #: 2, Class: Meadows, Samples: 18649 #: 3, Class: Gravel, Samples: 2099 #: 4, Class: Trees, Samples: 3064 #: 5, Class: Painted metal sheets, Samples: 1345 #: 6, Class: Bare Soil, Samples: 5029 #: 7, Class: Bitumen, Samples: 1330 #: 8, Class: Self-Blocking Bricks, Samples: 3682 #: 9, Class: Shadows, Samples: 947 Quick look ========== ![Pavia](assets/Pavia_60.png) Sample band of Pavia Centre dataset. ![Pavia gt](assets/Pavia_gt.png) Groundtruth of Pavia Centre dataset. ![PaviaU](assets/PaviaU_60.png) Sample band of Pavia University dataset. ![PaviaU gt](assets/PaviaU_gt.png) Groundtruth of Pavia University dataset. Credits ======= Pavia scenes were provided by Prof. Paolo Gamba from the Telecommunications and Remote Sensing Laboratory, Pavia university (Italy). This dataset was originally collected by Manuel Graña, Miguel-Angel Veganzones, Borja Ayerdi. The original link for the dataset is available below: URL
[]
[ "TAGS\n#license-unknown #region-us \n" ]
[ 13 ]
[ "passage: TAGS\n#license-unknown #region-us \n" ]