sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
listlengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
listlengths
0
25
languages
listlengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
listlengths
0
352
processed_texts
listlengths
1
353
tokens_length
listlengths
1
353
input_texts
listlengths
1
40
344feb062a8ed90a1f3e8513b9facf27e8752fe8
# Dataset Card for "soict_sentence_synthesis" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
thanhduycao/soict_sentence_synthesis
[ "region:us" ]
2023-10-27T13:32:56+00:00
{"dataset_info": {"features": [{"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 55042, "num_examples": 800}], "download_size": 23910, "dataset_size": 55042}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-27T13:32:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "soict_sentence_synthesis" More Information needed
[ "# Dataset Card for \"soict_sentence_synthesis\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"soict_sentence_synthesis\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"soict_sentence_synthesis\"\n\nMore Information needed" ]
5f996355104693841f571205b88b3cbbf951009b
# Dataset Card for "PhysiotherapyExercises_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Imran1/PhysiotherapyExercises_test
[ "region:us" ]
2023-10-27T13:42:43+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "Elbove Extension", "1": "KNEE Flexion", "2": "NECK Exercise", "3": "PlanterFlexion of Foot", "4": "Trunk Extension", "5": "Trunk Flexion", "6": "Wrist Extension", "7": "Wrist Flexion"}}}}], "splits": [{"name": "train", "num_bytes": 16383757.0, "num_examples": 40}], "download_size": 13986736, "dataset_size": 16383757.0}}
2023-10-27T13:42:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "PhysiotherapyExercises_test" More Information needed
[ "# Dataset Card for \"PhysiotherapyExercises_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"PhysiotherapyExercises_test\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"PhysiotherapyExercises_test\"\n\nMore Information needed" ]
197da9c578012b2eafec7647761da19f76e24a8c
# Dataset Card for "accepted_pairs_st" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
makram93/accepted_pairs_st
[ "region:us" ]
2023-10-27T13:53:18+00:00
{"dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "doc_id", "dtype": "string"}, {"name": "original_title", "sequence": "string"}, {"name": "right", "dtype": "string"}, {"name": "left", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 88447.0623234648, "num_examples": 100}], "download_size": 87877, "dataset_size": 88447.0623234648}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-27T13:55:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "accepted_pairs_st" More Information needed
[ "# Dataset Card for \"accepted_pairs_st\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"accepted_pairs_st\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"accepted_pairs_st\"\n\nMore Information needed" ]
0f301290eb33a90b2757feba62094ebdd76efe78
# Dataset Card for "rejected_pairs_st" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
makram93/rejected_pairs_st
[ "region:us" ]
2023-10-27T13:53:21+00:00
{"dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "doc_id", "dtype": "string"}, {"name": "original_title", "sequence": "string"}, {"name": "right", "dtype": "string"}, {"name": "left", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 88447.0623234648, "num_examples": 100}], "download_size": 82694, "dataset_size": 88447.0623234648}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-27T13:55:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rejected_pairs_st" More Information needed
[ "# Dataset Card for \"rejected_pairs_st\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rejected_pairs_st\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rejected_pairs_st\"\n\nMore Information needed" ]
81e700b8457f35571514ff26514a2e4db51c052e
# Dataset Card for "accepted_pairs_small" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
makram93/accepted_pairs_small
[ "region:us" ]
2023-10-27T13:57:39+00:00
{"dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "doc_id", "dtype": "string"}, {"name": "original_title", "sequence": "string"}, {"name": "right", "dtype": "string"}, {"name": "left", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 88447.0623234648, "num_examples": 100}], "download_size": 83182, "dataset_size": 88447.0623234648}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-27T13:59:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "accepted_pairs_small" More Information needed
[ "# Dataset Card for \"accepted_pairs_small\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"accepted_pairs_small\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"accepted_pairs_small\"\n\nMore Information needed" ]
9b0f8b7bcaac8c5075fab4ef02b6a6c4f466f3d4
# Dataset Card for "rejected_pairs_small" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
makram93/rejected_pairs_small
[ "region:us" ]
2023-10-27T13:57:42+00:00
{"dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "doc_id", "dtype": "string"}, {"name": "original_title", "sequence": "string"}, {"name": "right", "dtype": "string"}, {"name": "left", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 88447.0623234648, "num_examples": 100}], "download_size": 87326, "dataset_size": 88447.0623234648}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-27T13:59:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rejected_pairs_small" More Information needed
[ "# Dataset Card for \"rejected_pairs_small\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rejected_pairs_small\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"rejected_pairs_small\"\n\nMore Information needed" ]
e0c2cf88fcec28930f7ccc367b67b3dca1414172
# Dataset Card for "github-issues" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dariodematties/github-issues
[ "region:us" ]
2023-10-27T14:06:17+00:00
{"dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "repository_url", "dtype": "string"}, {"name": "labels_url", "dtype": "string"}, {"name": "comments_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "number", "dtype": "int64"}, {"name": "title", "dtype": "string"}, {"name": "user", "struct": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "labels", "list": [{"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "color", "dtype": "string"}, {"name": "default", "dtype": "bool"}, {"name": "description", "dtype": "string"}]}, {"name": "state", "dtype": "string"}, {"name": "locked", "dtype": "bool"}, {"name": "assignee", "struct": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "assignees", "list": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "milestone", "struct": [{"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "labels_url", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "number", "dtype": "int64"}, {"name": "title", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "creator", "struct": [{"name": "login", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "avatar_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}]}, {"name": "open_issues", "dtype": "int64"}, {"name": "closed_issues", "dtype": "int64"}, {"name": "state", "dtype": "string"}, {"name": "created_at", "dtype": "timestamp[s]"}, {"name": "updated_at", "dtype": "timestamp[s]"}, {"name": "due_on", "dtype": "null"}, {"name": "closed_at", "dtype": "null"}]}, {"name": "comments", "sequence": "string"}, {"name": "created_at", "dtype": "timestamp[s]"}, {"name": "updated_at", "dtype": "timestamp[s]"}, {"name": "closed_at", "dtype": "timestamp[s]"}, {"name": "author_association", "dtype": "string"}, {"name": "active_lock_reason", "dtype": "null"}, {"name": "body", "dtype": "string"}, {"name": "reactions", "struct": [{"name": "url", "dtype": "string"}, {"name": "total_count", "dtype": "int64"}, {"name": "+1", "dtype": "int64"}, {"name": "-1", "dtype": "int64"}, {"name": "laugh", "dtype": "int64"}, {"name": "hooray", "dtype": "int64"}, {"name": "confused", "dtype": "int64"}, {"name": "heart", "dtype": "int64"}, {"name": "rocket", "dtype": "int64"}, {"name": "eyes", "dtype": "int64"}]}, {"name": "timeline_url", "dtype": "string"}, {"name": "performed_via_github_app", "dtype": "null"}, {"name": "state_reason", "dtype": "string"}, {"name": "draft", "dtype": "bool"}, {"name": "pull_request", "struct": [{"name": "url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "diff_url", "dtype": "string"}, {"name": "patch_url", "dtype": "string"}, {"name": "merged_at", "dtype": "timestamp[s]"}]}, {"name": "is_pull_request", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 21223797, "num_examples": 3000}], "download_size": 6015181, "dataset_size": 21223797}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-27T14:06:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "github-issues" More Information needed
[ "# Dataset Card for \"github-issues\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"github-issues\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"github-issues\"\n\nMore Information needed" ]
124d6bfedc6bcc337f1ded1fae4e783a9a97b929
MSOM_Data_Driven_Challenge_2020: To support the 2020 MSOM Data Driven Research Challenge, JD.com, China’s largest retailer, offers transaction-level data to MSOM members for conducting data-driven research. This dataset includes the transactional data associated with over 2.5 million customers (457,298 made purchases) and 31,868 SKUs over the month of March in 2018. Researchers are welcome to develop econometric models or data-driven models using this database to address some of the suggested questions or examine their own research questions. Keywords: E-Commerce, Transactional Data, MSOM Society, Data-Driven Research See https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3511861 for details. This repo serves as easier access to the dataset. The dataset can also be accessed from here: https://connect.informs.org/msom/events/datadriven2020. All CopyRights are reserved by JD.com.
a6687543/MSOM_Data_Driven_Challenge_2020
[ "license:odbl", "region:us" ]
2023-10-27T14:11:39+00:00
{"license": "odbl"}
2023-10-27T15:48:49+00:00
[]
[]
TAGS #license-odbl #region-us
MSOM_Data_Driven_Challenge_2020: To support the 2020 MSOM Data Driven Research Challenge, URL, China’s largest retailer, offers transaction-level data to MSOM members for conducting data-driven research. This dataset includes the transactional data associated with over 2.5 million customers (457,298 made purchases) and 31,868 SKUs over the month of March in 2018. Researchers are welcome to develop econometric models or data-driven models using this database to address some of the suggested questions or examine their own research questions. Keywords: E-Commerce, Transactional Data, MSOM Society, Data-Driven Research See URL for details. This repo serves as easier access to the dataset. The dataset can also be accessed from here: URL All CopyRights are reserved by URL.
[]
[ "TAGS\n#license-odbl #region-us \n" ]
[ 12 ]
[ "passage: TAGS\n#license-odbl #region-us \n" ]
04f53ec0b737ab17370502a5ac891f2ab5060cf6
# Dataset Card for "dataset_2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ayoub999/dataset_2
[ "region:us" ]
2023-10-27T14:32:50+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "bboxes", "sequence": {"sequence": "int64"}}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "Ref", "2": "NumFa", "3": "Fourniss", "4": "DateFa", "5": "DateLim", "6": "TotalHT", "7": "TVA", "8": "TotalTTc", "9": "unitP", "10": "Qt", "11": "TVAP", "12": "descp"}}}}, {"name": "tokens", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 875976.0, "num_examples": 2}, {"name": "test", "num_bytes": 1021145.0, "num_examples": 1}], "download_size": 1276358, "dataset_size": 1897121.0}}
2023-10-27T20:28:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dataset_2" More Information needed
[ "# Dataset Card for \"dataset_2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dataset_2\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"dataset_2\"\n\nMore Information needed" ]
097f786fc6ece31a08ed209914ee1d94314367f3
Derives from https://www.kaggle.com/datasets/rtatman/questionanswer-dataset?resource=download we generated our own subset using `generate.py`.
rag-datasets/mini_wikipedia
[ "task_categories:question-answering", "task_categories:sentence-similarity", "size_categories:n<1K", "language:en", "license:cc-by-3.0", "rag", "wikipedia", "open-domain", "information-retrieval", "dpr", "region:us" ]
2023-10-27T14:50:06+00:00
{"language": ["en"], "license": "cc-by-3.0", "size_categories": ["n<1K"], "task_categories": ["question-answering", "sentence-similarity"], "tags": ["rag", "wikipedia", "open-domain", "information-retrieval", "dpr"], "configs": [{"config_name": "text-corpus", "data_files": [{"split": "passages", "path": "data/passages.parquet/*"}]}, {"config_name": "question-answer", "data_files": [{"split": "test", "path": "data/test.parquet/*"}]}]}
2023-10-28T07:55:16+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #task_categories-sentence-similarity #size_categories-n<1K #language-English #license-cc-by-3.0 #rag #wikipedia #open-domain #information-retrieval #dpr #region-us
Derives from URL we generated our own subset using 'URL'.
[]
[ "TAGS\n#task_categories-question-answering #task_categories-sentence-similarity #size_categories-n<1K #language-English #license-cc-by-3.0 #rag #wikipedia #open-domain #information-retrieval #dpr #region-us \n" ]
[ 72 ]
[ "passage: TAGS\n#task_categories-question-answering #task_categories-sentence-similarity #size_categories-n<1K #language-English #license-cc-by-3.0 #rag #wikipedia #open-domain #information-retrieval #dpr #region-us \n" ]
37b2407ab21069288a4df620a24ab2d8529656c7
# Dataset Card for "soict_train_synthesis_1000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
thanhduycao/soict_train_synthesis_1000
[ "region:us" ]
2023-10-27T14:50:50+00:00
{"dataset_info": {"features": [{"name": "audio", "struct": [{"name": "array", "sequence": "float64"}, {"name": "path", "dtype": "null"}, {"name": "sampling_rate", "dtype": "int64"}]}, {"name": "sentence_norm", "dtype": "string"}, {"name": "wer", "dtype": "int64"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 534713143, "num_examples": 1000}], "download_size": 126390777, "dataset_size": 534713143}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-27T14:51:01+00:00
[]
[]
TAGS #region-us
# Dataset Card for "soict_train_synthesis_1000" More Information needed
[ "# Dataset Card for \"soict_train_synthesis_1000\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"soict_train_synthesis_1000\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"soict_train_synthesis_1000\"\n\nMore Information needed" ]
7a848fe86d2d4b4ce81ec3486ade34906498f5b5
Derives from http://participants-area.bioasq.org/Tasks/11b/trainingDataset/ we generated our own subset using `generate.py`.
rag-datasets/mini-bioasq
[ "task_categories:question-answering", "task_categories:sentence-similarity", "language:en", "license:cc-by-2.5", "rag", "dpr", "information-retrieval", "question-answering", "biomedical", "region:us" ]
2023-10-27T14:51:17+00:00
{"language": ["en"], "license": "cc-by-2.5", "task_categories": ["question-answering", "sentence-similarity"], "tags": ["rag", "dpr", "information-retrieval", "question-answering", "biomedical"], "configs": [{"config_name": "text-corpus", "data_files": [{"split": "passages", "path": "data/passages.parquet/*"}]}, {"config_name": "question-answer-passages", "data_files": [{"split": "test", "path": "data/test.parquet/*"}]}]}
2023-10-28T14:04:03+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #task_categories-sentence-similarity #language-English #license-cc-by-2.5 #rag #dpr #information-retrieval #question-answering #biomedical #region-us
Derives from URL we generated our own subset using 'URL'.
[]
[ "TAGS\n#task_categories-question-answering #task_categories-sentence-similarity #language-English #license-cc-by-2.5 #rag #dpr #information-retrieval #question-answering #biomedical #region-us \n" ]
[ 65 ]
[ "passage: TAGS\n#task_categories-question-answering #task_categories-sentence-similarity #language-English #license-cc-by-2.5 #rag #dpr #information-retrieval #question-answering #biomedical #region-us \n" ]
0b57fcaaf78c572555b6c576b106a76e1a07c10a
# Dataset Card for "ds_rplanpy_category" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ekuhn/ds_rplanpy_category
[ "region:us" ]
2023-10-27T15:06:08+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "num_rooms", "dtype": "int64"}, {"name": "img", "struct": [{"name": "bytes", "dtype": "binary"}, {"name": "path", "dtype": "null"}]}], "splits": [{"name": "train", "num_bytes": 30753308, "num_examples": 36850}, {"name": "val", "num_bytes": 7686278, "num_examples": 9213}], "download_size": 19877649, "dataset_size": 38439586}}
2023-10-27T15:06:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ds_rplanpy_category" More Information needed
[ "# Dataset Card for \"ds_rplanpy_category\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ds_rplanpy_category\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ds_rplanpy_category\"\n\nMore Information needed" ]
4b83ce1bdd5f5cfc4794dd4f10d2ad7ce5ea9467
# Dataset Card for "dataset-creator-reddit-amitheasshole" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) --- Generated Part of README Below --- ## Dataset Overview The goal is to have an open dataset of [r/amitheasshole](https://www.reddit.com/r/amitheasshole/) submissions. Im leveraging PRAW and the reddit API to get downloads. There is a limit of 1000 in an API call and limited search functionality, so this is run hourly to get new submissions. ## Creation Details This dataset was created by [derek-thomas/dataset-creator-reddit-amitheasshole](https://huggingface.co/spaces/derek-thomas/dataset-creator-reddit-amitheasshole) ## Update Frequency The dataset is updated hourly with the most recent update being `2023-12-04 12:00:00 UTC+0000` where we added **200 new rows**. ## Licensing [Reddit Licensing terms](https://www.redditinc.com/policies/data-api-terms) as accessed on October 25: > The Content created with or submitted to our Services by Users (“User Content”) is owned by Users and not by Reddit. Subject to your complete and ongoing compliance with the Data API Terms, Reddit grants you a non-exclusive, non-transferable, non-sublicensable, and revocable license to copy and display the User Content using the Data API solely as necessary to develop, deploy, distribute, and run your App to your App Users. You may not modify the User Content except to format it for such display. You will comply with any requirements or restrictions imposed on usage of User Content by their respective owners, which may include "all rights reserved" notices, Creative Commons licenses, or other terms and conditions that may be agreed upon between you and the owners. Except as expressly permitted by this section, no other rights or licenses are granted or implied, including any right to use User Content for other purposes, such as for training a machine learning or AI model, without the express permission of rightsholders in the applicable User Content My take is that you can't use this data for *training* without getting permission. ## Opt-out To opt-out of this dataset please make a request in the community tab
derek-thomas/dataset-creator-reddit-amitheasshole
[ "region:us" ]
2023-10-27T15:21:23+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "content", "dtype": "string"}, {"name": "poster", "dtype": "string"}, {"name": "date_utc", "dtype": "timestamp[ns]"}, {"name": "flair", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "permalink", "dtype": "string"}, {"name": "id", "dtype": "string"}, {"name": "content_length", "dtype": "int64"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 5358538, "num_examples": 2570}], "download_size": 3199986, "dataset_size": 5358538}}
2023-12-04T12:00:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dataset-creator-reddit-amitheasshole" More Information needed --- Generated Part of README Below --- ## Dataset Overview The goal is to have an open dataset of r/amitheasshole submissions. Im leveraging PRAW and the reddit API to get downloads. There is a limit of 1000 in an API call and limited search functionality, so this is run hourly to get new submissions. ## Creation Details This dataset was created by derek-thomas/dataset-creator-reddit-amitheasshole ## Update Frequency The dataset is updated hourly with the most recent update being '2023-12-04 12:00:00 UTC+0000' where we added 200 new rows. ## Licensing Reddit Licensing terms as accessed on October 25: > The Content created with or submitted to our Services by Users (“User Content”) is owned by Users and not by Reddit. Subject to your complete and ongoing compliance with the Data API Terms, Reddit grants you a non-exclusive, non-transferable, non-sublicensable, and revocable license to copy and display the User Content using the Data API solely as necessary to develop, deploy, distribute, and run your App to your App Users. You may not modify the User Content except to format it for such display. You will comply with any requirements or restrictions imposed on usage of User Content by their respective owners, which may include "all rights reserved" notices, Creative Commons licenses, or other terms and conditions that may be agreed upon between you and the owners. Except as expressly permitted by this section, no other rights or licenses are granted or implied, including any right to use User Content for other purposes, such as for training a machine learning or AI model, without the express permission of rightsholders in the applicable User Content My take is that you can't use this data for *training* without getting permission. ## Opt-out To opt-out of this dataset please make a request in the community tab
[ "# Dataset Card for \"dataset-creator-reddit-amitheasshole\"\n\nMore Information needed\n\n--- Generated Part of README Below ---", "## Dataset Overview\nThe goal is to have an open dataset of r/amitheasshole submissions. Im leveraging PRAW and the reddit API to get downloads.\n\nThere is a limit of 1000 in an API call and limited search functionality, so this is run hourly to get new submissions.", "## Creation Details\nThis dataset was created by derek-thomas/dataset-creator-reddit-amitheasshole", "## Update Frequency\nThe dataset is updated hourly with the most recent update being '2023-12-04 12:00:00 UTC+0000' where we added 200 new rows.", "## Licensing \nReddit Licensing terms as accessed on October 25:\n> The Content created with or submitted to our Services by Users (“User Content”) is owned by Users and not by Reddit. Subject to your complete and ongoing compliance with the Data API Terms, Reddit grants you a non-exclusive, non-transferable, non-sublicensable, and revocable license to copy and display the User Content using the Data API solely as necessary to develop, deploy, distribute, and run your App to your App Users. You may not modify the User Content except to format it for such display. You will comply with any requirements or restrictions imposed on usage of User Content by their respective owners, which may include \"all rights reserved\" notices, Creative Commons licenses, or other terms and conditions that may be agreed upon between you and the owners. Except as expressly permitted by this section, no other rights or licenses are granted or implied, including any right to use User Content for other purposes, such as for training a machine learning or AI model, without the express permission of rightsholders in the applicable User Content\n\nMy take is that you can't use this data for *training* without getting permission.", "## Opt-out\nTo opt-out of this dataset please make a request in the community tab" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dataset-creator-reddit-amitheasshole\"\n\nMore Information needed\n\n--- Generated Part of README Below ---", "## Dataset Overview\nThe goal is to have an open dataset of r/amitheasshole submissions. Im leveraging PRAW and the reddit API to get downloads.\n\nThere is a limit of 1000 in an API call and limited search functionality, so this is run hourly to get new submissions.", "## Creation Details\nThis dataset was created by derek-thomas/dataset-creator-reddit-amitheasshole", "## Update Frequency\nThe dataset is updated hourly with the most recent update being '2023-12-04 12:00:00 UTC+0000' where we added 200 new rows.", "## Licensing \nReddit Licensing terms as accessed on October 25:\n> The Content created with or submitted to our Services by Users (“User Content”) is owned by Users and not by Reddit. Subject to your complete and ongoing compliance with the Data API Terms, Reddit grants you a non-exclusive, non-transferable, non-sublicensable, and revocable license to copy and display the User Content using the Data API solely as necessary to develop, deploy, distribute, and run your App to your App Users. You may not modify the User Content except to format it for such display. You will comply with any requirements or restrictions imposed on usage of User Content by their respective owners, which may include \"all rights reserved\" notices, Creative Commons licenses, or other terms and conditions that may be agreed upon between you and the owners. Except as expressly permitted by this section, no other rights or licenses are granted or implied, including any right to use User Content for other purposes, such as for training a machine learning or AI model, without the express permission of rightsholders in the applicable User Content\n\nMy take is that you can't use this data for *training* without getting permission.", "## Opt-out\nTo opt-out of this dataset please make a request in the community tab" ]
[ 6, 34, 67, 30, 38, 268, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"dataset-creator-reddit-amitheasshole\"\n\nMore Information needed\n\n--- Generated Part of README Below ---## Dataset Overview\nThe goal is to have an open dataset of r/amitheasshole submissions. Im leveraging PRAW and the reddit API to get downloads.\n\nThere is a limit of 1000 in an API call and limited search functionality, so this is run hourly to get new submissions.## Creation Details\nThis dataset was created by derek-thomas/dataset-creator-reddit-amitheasshole## Update Frequency\nThe dataset is updated hourly with the most recent update being '2023-12-04 12:00:00 UTC+0000' where we added 200 new rows.## Licensing \nReddit Licensing terms as accessed on October 25:\n> The Content created with or submitted to our Services by Users (“User Content”) is owned by Users and not by Reddit. Subject to your complete and ongoing compliance with the Data API Terms, Reddit grants you a non-exclusive, non-transferable, non-sublicensable, and revocable license to copy and display the User Content using the Data API solely as necessary to develop, deploy, distribute, and run your App to your App Users. You may not modify the User Content except to format it for such display. You will comply with any requirements or restrictions imposed on usage of User Content by their respective owners, which may include \"all rights reserved\" notices, Creative Commons licenses, or other terms and conditions that may be agreed upon between you and the owners. Except as expressly permitted by this section, no other rights or licenses are granted or implied, including any right to use User Content for other purposes, such as for training a machine learning or AI model, without the express permission of rightsholders in the applicable User Content\n\nMy take is that you can't use this data for *training* without getting permission.## Opt-out\nTo opt-out of this dataset please make a request in the community tab" ]
4a6c20a9a9b8c479bf28486f6c72bd23be448c3b
# Aposemat IoT-23 - a Labeled Dataset with Malcious and Benign Iot Network Traffic **Homepage:** [https://www.stratosphereips.org/datasets-iot23](https://www.stratosphereips.org/datasets-iot23) This dataset contains a subset of the data from 20 captures of Malcious network traffic and 3 captures from live Benign Traffic on Internet of Things (IoT) devices. Created by Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga at the Avast AIC laboratory with the funding of Avast Software, this dataset is one of the best in the field for Intrusion Detection Systems (IDS) for IoT Devices [(Comparative Analysis of IoT Botnet Datasets)](https://doi.org/10.53070/bbd.1173687). The selection of the subset was determined by [Aqeel Ahmed on Kaggle](https://www.kaggle.com/datasets/engraqeel/iot23preprocesseddata) and contains 6 million samples. The Kaggle upload, nor this one, have employed data balancing. The Kaggle card does not contain methodology to understand what criteria was used to select these samples. If you want ensure best practice, use this dataset to mock-up processing the data into a model before using the full dataset with data balancing. This will require processing the 8GB of conn.log.labelled files. This dataset only notes if the data is Malcious or Benign. The original dataset labels the type of malcious traffic aswell. This means this processing of the dataset is only suited for binary classification. This dataset only contains the 6 most important columns: responder's port, transport layer protocol, connection state, number of packets sent by the originator, number of IP level bytes that the originator sent, number of IP level bytes that the responder sent, and the malicious or bengin label. These columns were determined from research done by Alani & Miri in "Towards an Explainable Universal Feature Set for IoT Intrusion Detection." They determined that these columns alone give 98% accuracy. This means a light weight model can contain these column alone and still have non-trivial results. This dataset only contains 2.5k rows, as the duplicates have been dropped. # Feature information: All features originate from the [Zeek](https://docs.zeek.org/en/master/scripts/base/protocols/conn/main.zeek.html#type-Conn::Info) processing performed by the dataset creators. [See notes here for caviats for each column](https://docs.zeek.org/en/master/scripts/base/protocols/conn/main.zeek.html#type-Conn::Info). <details> <summary>Expand for feature names, descriptions, and datatypes</summary> Name: id.resp_p Description: The responder’s port number. Data type: int64 - uint64 in original Name: proto Description: The transport layer protocol of the connection. Data type: string - enum(unknown_transport, tcp, udp, icmp). Only TCP and UDP in subset Name: conn_state Description: Value indicating connection state. (S0, S1, SF, REJ, S2, S3, RSTO, RSTR, RSTOS0, RSTRH, SH, SHR, OTH) Data type: optional string Name: orig_pkts Description: Number of packets that the originator sent. Data type: optional int64 - uint64 in original Name: orig_ip_bytes Description: Number of IP level bytes that the originator sent. Data type: optional int64 - uint64 in original Name: resp_ip_bytes Description: Number of IP level bytes that the responder sent. Data type: optional int64 - uint64 in original Name: label Description: Specifies if data point is benign or malicious. Data type: string - enum(Malicious, Benign) </details> ## Citation If you are using this dataset for your research, please reference it as “Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga. (2020). IoT-23: A labeled dataset with malicious and benign IoT network traffic (Version 1.0.0) [Data set]. Zenodo. http://doi.org/10.5281/zenodo.4743746”
19kmunz/iot-23-preprocessed-minimumcolumns
[ "region:us" ]
2023-10-27T15:26:59+00:00
{"dataset_info": {"features": [{"name": "id.resp_p", "dtype": "int64"}, {"name": "proto", "dtype": "string"}, {"name": "conn_state", "dtype": "string"}, {"name": "orig_pkts", "dtype": "int64"}, {"name": "orig_ip_bytes", "dtype": "int64"}, {"name": "resp_ip_bytes", "dtype": "int64"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 132244, "num_examples": 2370}], "download_size": 0, "dataset_size": 132244}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-03T17:12:16+00:00
[]
[]
TAGS #region-us
# Aposemat IoT-23 - a Labeled Dataset with Malcious and Benign Iot Network Traffic Homepage: URL This dataset contains a subset of the data from 20 captures of Malcious network traffic and 3 captures from live Benign Traffic on Internet of Things (IoT) devices. Created by Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga at the Avast AIC laboratory with the funding of Avast Software, this dataset is one of the best in the field for Intrusion Detection Systems (IDS) for IoT Devices (Comparative Analysis of IoT Botnet Datasets). The selection of the subset was determined by Aqeel Ahmed on Kaggle and contains 6 million samples. The Kaggle upload, nor this one, have employed data balancing. The Kaggle card does not contain methodology to understand what criteria was used to select these samples. If you want ensure best practice, use this dataset to mock-up processing the data into a model before using the full dataset with data balancing. This will require processing the 8GB of URL.labelled files. This dataset only notes if the data is Malcious or Benign. The original dataset labels the type of malcious traffic aswell. This means this processing of the dataset is only suited for binary classification. This dataset only contains the 6 most important columns: responder's port, transport layer protocol, connection state, number of packets sent by the originator, number of IP level bytes that the originator sent, number of IP level bytes that the responder sent, and the malicious or bengin label. These columns were determined from research done by Alani & Miri in "Towards an Explainable Universal Feature Set for IoT Intrusion Detection." They determined that these columns alone give 98% accuracy. This means a light weight model can contain these column alone and still have non-trivial results. This dataset only contains 2.5k rows, as the duplicates have been dropped. # Feature information: All features originate from the Zeek processing performed by the dataset creators. See notes here for caviats for each column. <details> <summary>Expand for feature names, descriptions, and datatypes</summary> Name: id.resp_p Description: The responder’s port number. Data type: int64 - uint64 in original Name: proto Description: The transport layer protocol of the connection. Data type: string - enum(unknown_transport, tcp, udp, icmp). Only TCP and UDP in subset Name: conn_state Description: Value indicating connection state. (S0, S1, SF, REJ, S2, S3, RSTO, RSTR, RSTOS0, RSTRH, SH, SHR, OTH) Data type: optional string Name: orig_pkts Description: Number of packets that the originator sent. Data type: optional int64 - uint64 in original Name: orig_ip_bytes Description: Number of IP level bytes that the originator sent. Data type: optional int64 - uint64 in original Name: resp_ip_bytes Description: Number of IP level bytes that the responder sent. Data type: optional int64 - uint64 in original Name: label Description: Specifies if data point is benign or malicious. Data type: string - enum(Malicious, Benign) </details> If you are using this dataset for your research, please reference it as “Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga. (2020). IoT-23: A labeled dataset with malicious and benign IoT network traffic (Version 1.0.0) [Data set]. Zenodo. URL”
[ "# Aposemat IoT-23 - a Labeled Dataset with Malcious and Benign Iot Network Traffic \nHomepage: URL\n\nThis dataset contains a subset of the data from 20 captures of Malcious network traffic and 3 captures from live Benign Traffic on Internet of Things (IoT) devices. Created by Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga at the Avast AIC laboratory with the funding of Avast Software, this dataset is one of the best in the field for Intrusion Detection Systems (IDS) for IoT Devices (Comparative Analysis of IoT Botnet Datasets). \n\nThe selection of the subset was determined by Aqeel Ahmed on Kaggle and contains 6 million samples. The Kaggle upload, nor this one, have employed data balancing. The Kaggle card does not contain methodology to understand what criteria was used to select these samples. If you want ensure best practice, use this dataset to mock-up processing the data into a model before using the full dataset with data balancing. This will require processing the 8GB of URL.labelled files.\n\nThis dataset only notes if the data is Malcious or Benign. The original dataset labels the type of malcious traffic aswell. This means this processing of the dataset is only suited for binary classification.\n\nThis dataset only contains the 6 most important columns: responder's port, transport layer protocol, connection state, number of packets sent by the originator, number of IP level bytes that the originator sent, number of IP level bytes that the responder sent, and the malicious or bengin label. These columns were determined from research done by Alani & Miri in \"Towards an Explainable Universal Feature Set for IoT Intrusion Detection.\" They determined that these columns alone give 98% accuracy. This means a light weight model can contain these column alone and still have non-trivial results. This dataset only contains 2.5k rows, as the duplicates have been dropped.", "# Feature information:\n\nAll features originate from the Zeek processing performed by the dataset creators. See notes here for caviats for each column. \n<details>\n <summary>Expand for feature names, descriptions, and datatypes</summary>\n\nName: id.resp_p \nDescription: The responder’s port number. \nData type: int64 - uint64 in original \n\nName: proto \nDescription: The transport layer protocol of the connection. \nData type: string - enum(unknown_transport, tcp, udp, icmp). Only TCP and UDP in subset \n\nName: conn_state \nDescription: Value indicating connection state. (S0, S1, SF, REJ, S2, S3, RSTO, RSTR, RSTOS0, RSTRH, SH, SHR, OTH) \nData type: optional string \n\nName: orig_pkts \nDescription: Number of packets that the originator sent. \nData type: optional int64 - uint64 in original \n\nName: orig_ip_bytes \nDescription: Number of IP level bytes that the originator sent. \nData type: optional int64 - uint64 in original \n\nName: resp_ip_bytes \nDescription: Number of IP level bytes that the responder sent. \nData type: optional int64 - uint64 in original \n\nName: label \nDescription: Specifies if data point is benign or malicious. \nData type: string - enum(Malicious, Benign)\n\n</details>\n\nIf you are using this dataset for your research, please reference it as “Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga. (2020). IoT-23: A labeled dataset with malicious and benign IoT network traffic (Version 1.0.0) [Data set]. Zenodo. URL”" ]
[ "TAGS\n#region-us \n", "# Aposemat IoT-23 - a Labeled Dataset with Malcious and Benign Iot Network Traffic \nHomepage: URL\n\nThis dataset contains a subset of the data from 20 captures of Malcious network traffic and 3 captures from live Benign Traffic on Internet of Things (IoT) devices. Created by Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga at the Avast AIC laboratory with the funding of Avast Software, this dataset is one of the best in the field for Intrusion Detection Systems (IDS) for IoT Devices (Comparative Analysis of IoT Botnet Datasets). \n\nThe selection of the subset was determined by Aqeel Ahmed on Kaggle and contains 6 million samples. The Kaggle upload, nor this one, have employed data balancing. The Kaggle card does not contain methodology to understand what criteria was used to select these samples. If you want ensure best practice, use this dataset to mock-up processing the data into a model before using the full dataset with data balancing. This will require processing the 8GB of URL.labelled files.\n\nThis dataset only notes if the data is Malcious or Benign. The original dataset labels the type of malcious traffic aswell. This means this processing of the dataset is only suited for binary classification.\n\nThis dataset only contains the 6 most important columns: responder's port, transport layer protocol, connection state, number of packets sent by the originator, number of IP level bytes that the originator sent, number of IP level bytes that the responder sent, and the malicious or bengin label. These columns were determined from research done by Alani & Miri in \"Towards an Explainable Universal Feature Set for IoT Intrusion Detection.\" They determined that these columns alone give 98% accuracy. This means a light weight model can contain these column alone and still have non-trivial results. This dataset only contains 2.5k rows, as the duplicates have been dropped.", "# Feature information:\n\nAll features originate from the Zeek processing performed by the dataset creators. See notes here for caviats for each column. \n<details>\n <summary>Expand for feature names, descriptions, and datatypes</summary>\n\nName: id.resp_p \nDescription: The responder’s port number. \nData type: int64 - uint64 in original \n\nName: proto \nDescription: The transport layer protocol of the connection. \nData type: string - enum(unknown_transport, tcp, udp, icmp). Only TCP and UDP in subset \n\nName: conn_state \nDescription: Value indicating connection state. (S0, S1, SF, REJ, S2, S3, RSTO, RSTR, RSTOS0, RSTRH, SH, SHR, OTH) \nData type: optional string \n\nName: orig_pkts \nDescription: Number of packets that the originator sent. \nData type: optional int64 - uint64 in original \n\nName: orig_ip_bytes \nDescription: Number of IP level bytes that the originator sent. \nData type: optional int64 - uint64 in original \n\nName: resp_ip_bytes \nDescription: Number of IP level bytes that the responder sent. \nData type: optional int64 - uint64 in original \n\nName: label \nDescription: Specifies if data point is benign or malicious. \nData type: string - enum(Malicious, Benign)\n\n</details>\n\nIf you are using this dataset for your research, please reference it as “Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga. (2020). IoT-23: A labeled dataset with malicious and benign IoT network traffic (Version 1.0.0) [Data set]. Zenodo. URL”" ]
[ 6, 467, 408 ]
[ "passage: TAGS\n#region-us \n# Aposemat IoT-23 - a Labeled Dataset with Malcious and Benign Iot Network Traffic \nHomepage: URL\n\nThis dataset contains a subset of the data from 20 captures of Malcious network traffic and 3 captures from live Benign Traffic on Internet of Things (IoT) devices. Created by Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga at the Avast AIC laboratory with the funding of Avast Software, this dataset is one of the best in the field for Intrusion Detection Systems (IDS) for IoT Devices (Comparative Analysis of IoT Botnet Datasets). \n\nThe selection of the subset was determined by Aqeel Ahmed on Kaggle and contains 6 million samples. The Kaggle upload, nor this one, have employed data balancing. The Kaggle card does not contain methodology to understand what criteria was used to select these samples. If you want ensure best practice, use this dataset to mock-up processing the data into a model before using the full dataset with data balancing. This will require processing the 8GB of URL.labelled files.\n\nThis dataset only notes if the data is Malcious or Benign. The original dataset labels the type of malcious traffic aswell. This means this processing of the dataset is only suited for binary classification.\n\nThis dataset only contains the 6 most important columns: responder's port, transport layer protocol, connection state, number of packets sent by the originator, number of IP level bytes that the originator sent, number of IP level bytes that the responder sent, and the malicious or bengin label. These columns were determined from research done by Alani & Miri in \"Towards an Explainable Universal Feature Set for IoT Intrusion Detection.\" They determined that these columns alone give 98% accuracy. This means a light weight model can contain these column alone and still have non-trivial results. This dataset only contains 2.5k rows, as the duplicates have been dropped." ]
327bfab0b30d966c2de2743a26e142e2061504ab
# Dataset Card for "toy-tla-data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
aneeshas/toy-tla-data
[ "region:us" ]
2023-10-27T15:29:41+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 83501, "num_examples": 50}, {"name": "test", "num_bytes": 28275, "num_examples": 20}], "download_size": 86160, "dataset_size": 111776}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2023-10-27T16:06:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "toy-tla-data" More Information needed
[ "# Dataset Card for \"toy-tla-data\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"toy-tla-data\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"toy-tla-data\"\n\nMore Information needed" ]
8ea0aff660d0ae8c9ec9d8a3ae6be46e5c808cdf
# Dataset Card for BookCorpus ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [https://tahrirchi.uz/grammatika-tekshiruvi](https://tahrirchi.uz/grammatika-tekshiruvi) - **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Size of downloaded dataset files:** 16.98 GB - **Size of the generated dataset:** 32.95 GB - **Total amount of disk used:** 49.93 GB ### Dataset Summary In an effort to democratize research on low-resource languages, we release UzBooks dataset, a cleaned book corpus consisting of nearly 40000 books in Uzbek Language divided into two branches: "original" and "lat," representing the OCRed (Latin and Cyrillic) and fully Latin versions of the texts, respectively. Please refer to our [blogpost](https://tahrirchi.uz/grammatika-tekshiruvi) and paper (Coming soon!) for further details. To load and use dataset, run this script: ```python from datasets import load_dataset uz_books=load_dataset("tahrirchi/uz-books") ``` ## Dataset Structure ### Data Instances #### plain_text - **Size of downloaded dataset files:** 16.98 GB - **Size of the generated dataset:** 32.95 GB - **Total amount of disk used:** 49.93 GB An example of 'train' looks as follows. ``` { "text": "Hamsa\nAlisher Navoiy ..." } ``` ### Data Fields The data fields are the same among all splits. #### plain_text - `text`: a `string` feature that contains text of the books. ### Data Splits | name | | |-----------------|--------:| | original | 39712 | | lat | 39712 | ## Dataset Creation The books have been crawled from various internet sources and preprocessed using Optical Character Recognition techniques in [Tesseract OCR Engine](https://github.com/tesseract-ocr/tesseract). The latin version is created by converting the original dataset with highly curated scripts in order to put more emphasis on the research and development of the field. ## Citation Please cite this model using the following format: ``` @online{Mamasaidov2023UzBooks, author = {Mukhammadsaid Mamasaidov and Abror Shopulatov}, title = {UzBooks dataset}, year = {2023}, url = {https://huggingface.co/datasets/tahrirchi/uz-books}, note = {Accessed: 2023-10-28}, % change this date urldate = {2023-10-28} % change this date } ``` ## Gratitude We are thankful to these awesome organizations and people for helping to make it happen: - [Ilya Gusev](https://github.com/IlyaGusev/): for advise throughout the process - [David Dale](https://daviddale.ru): for advise throughout the process ## Contacts We believe that this work will enable and inspire all enthusiasts around the world to open the hidden beauty of low-resource languages, in particular Uzbek. For further development and issues about the dataset, please use [email protected] or [email protected] to contact.
tahrirchi/uz-books
[ "task_categories:text-generation", "task_categories:fill-mask", "task_ids:language-modeling", "task_ids:masked-language-modeling", "annotations_creators:no-annotation", "multilinguality:monolingual", "size_categories:10M<n<100M", "language:uz", "license:apache-2.0", "uz", "books", "region:us" ]
2023-10-27T15:35:16+00:00
{"annotations_creators": ["no-annotation"], "language": ["uz"], "license": "apache-2.0", "multilinguality": ["monolingual"], "size_categories": ["10M<n<100M"], "task_categories": ["text-generation", "fill-mask"], "task_ids": ["language-modeling", "masked-language-modeling"], "pretty_name": "UzBooks", "configs": [{"config_name": "default", "data_files": [{"split": "original", "path": "data/original-*"}, {"split": "lat", "path": "data/lat-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "original", "num_bytes": 19244856855, "num_examples": 39712}, {"name": "lat", "num_bytes": 13705512346, "num_examples": 39712}], "download_size": 16984559355, "dataset_size": 32950369201}, "tags": ["uz", "books"]}
2023-10-28T18:11:13+00:00
[]
[ "uz" ]
TAGS #task_categories-text-generation #task_categories-fill-mask #task_ids-language-modeling #task_ids-masked-language-modeling #annotations_creators-no-annotation #multilinguality-monolingual #size_categories-10M<n<100M #language-Uzbek #license-apache-2.0 #uz #books #region-us
Dataset Card for BookCorpus =========================== Table of Contents ----------------- * Dataset Description + Dataset Summary + Supported Tasks and Leaderboards + Languages * Dataset Structure + Data Instances + Data Fields + Data Splits * Dataset Creation + Curation Rationale + Source Data + Annotations + Personal and Sensitive Information * Considerations for Using the Data + Social Impact of Dataset + Discussion of Biases + Other Known Limitations * Additional Information + Dataset Curators + Licensing Information + Citation Information + Contributions Dataset Description ------------------- * Homepage: URL * Repository: * Paper: * Point of Contact: * Size of downloaded dataset files: 16.98 GB * Size of the generated dataset: 32.95 GB * Total amount of disk used: 49.93 GB ### Dataset Summary In an effort to democratize research on low-resource languages, we release UzBooks dataset, a cleaned book corpus consisting of nearly 40000 books in Uzbek Language divided into two branches: "original" and "lat," representing the OCRed (Latin and Cyrillic) and fully Latin versions of the texts, respectively. Please refer to our blogpost and paper (Coming soon!) for further details. To load and use dataset, run this script: Dataset Structure ----------------- ### Data Instances #### plain\_text * Size of downloaded dataset files: 16.98 GB * Size of the generated dataset: 32.95 GB * Total amount of disk used: 49.93 GB An example of 'train' looks as follows. ### Data Fields The data fields are the same among all splits. #### plain\_text * 'text': a 'string' feature that contains text of the books. ### Data Splits Dataset Creation ---------------- The books have been crawled from various internet sources and preprocessed using Optical Character Recognition techniques in Tesseract OCR Engine. The latin version is created by converting the original dataset with highly curated scripts in order to put more emphasis on the research and development of the field. Please cite this model using the following format: Gratitude --------- We are thankful to these awesome organizations and people for helping to make it happen: * Ilya Gusev: for advise throughout the process * David Dale: for advise throughout the process Contacts -------- We believe that this work will enable and inspire all enthusiasts around the world to open the hidden beauty of low-resource languages, in particular Uzbek. For further development and issues about the dataset, please use m.mamasaidov@URL or a.shopolatov@URL to contact.
[ "### Dataset Summary\n\n\nIn an effort to democratize research on low-resource languages, we release UzBooks dataset, a cleaned book corpus consisting of nearly 40000 books in Uzbek Language divided into two branches: \"original\" and \"lat,\" representing the OCRed (Latin and Cyrillic) and fully Latin versions of the texts, respectively.\n\n\nPlease refer to our blogpost and paper (Coming soon!) for further details.\n\n\nTo load and use dataset, run this script:\n\n\nDataset Structure\n-----------------", "### Data Instances", "#### plain\\_text\n\n\n* Size of downloaded dataset files: 16.98 GB\n* Size of the generated dataset: 32.95 GB\n* Total amount of disk used: 49.93 GB\n\n\nAn example of 'train' looks as follows.", "### Data Fields\n\n\nThe data fields are the same among all splits.", "#### plain\\_text\n\n\n* 'text': a 'string' feature that contains text of the books.", "### Data Splits\n\n\n\nDataset Creation\n----------------\n\n\nThe books have been crawled from various internet sources and preprocessed using Optical Character Recognition techniques in Tesseract OCR Engine. The latin version is created by converting the original dataset with highly curated scripts in order to put more emphasis on the research and development of the field.\n\n\nPlease cite this model using the following format:\n\n\nGratitude\n---------\n\n\nWe are thankful to these awesome organizations and people for helping to make it happen:\n\n\n* Ilya Gusev: for advise throughout the process\n* David Dale: for advise throughout the process\n\n\nContacts\n--------\n\n\nWe believe that this work will enable and inspire all enthusiasts around the world to open the hidden beauty of low-resource languages, in particular Uzbek.\n\n\nFor further development and issues about the dataset, please use m.mamasaidov@URL or a.shopolatov@URL to contact." ]
[ "TAGS\n#task_categories-text-generation #task_categories-fill-mask #task_ids-language-modeling #task_ids-masked-language-modeling #annotations_creators-no-annotation #multilinguality-monolingual #size_categories-10M<n<100M #language-Uzbek #license-apache-2.0 #uz #books #region-us \n", "### Dataset Summary\n\n\nIn an effort to democratize research on low-resource languages, we release UzBooks dataset, a cleaned book corpus consisting of nearly 40000 books in Uzbek Language divided into two branches: \"original\" and \"lat,\" representing the OCRed (Latin and Cyrillic) and fully Latin versions of the texts, respectively.\n\n\nPlease refer to our blogpost and paper (Coming soon!) for further details.\n\n\nTo load and use dataset, run this script:\n\n\nDataset Structure\n-----------------", "### Data Instances", "#### plain\\_text\n\n\n* Size of downloaded dataset files: 16.98 GB\n* Size of the generated dataset: 32.95 GB\n* Total amount of disk used: 49.93 GB\n\n\nAn example of 'train' looks as follows.", "### Data Fields\n\n\nThe data fields are the same among all splits.", "#### plain\\_text\n\n\n* 'text': a 'string' feature that contains text of the books.", "### Data Splits\n\n\n\nDataset Creation\n----------------\n\n\nThe books have been crawled from various internet sources and preprocessed using Optical Character Recognition techniques in Tesseract OCR Engine. The latin version is created by converting the original dataset with highly curated scripts in order to put more emphasis on the research and development of the field.\n\n\nPlease cite this model using the following format:\n\n\nGratitude\n---------\n\n\nWe are thankful to these awesome organizations and people for helping to make it happen:\n\n\n* Ilya Gusev: for advise throughout the process\n* David Dale: for advise throughout the process\n\n\nContacts\n--------\n\n\nWe believe that this work will enable and inspire all enthusiasts around the world to open the hidden beauty of low-resource languages, in particular Uzbek.\n\n\nFor further development and issues about the dataset, please use m.mamasaidov@URL or a.shopolatov@URL to contact." ]
[ 101, 121, 6, 53, 17, 24, 198 ]
[ "passage: TAGS\n#task_categories-text-generation #task_categories-fill-mask #task_ids-language-modeling #task_ids-masked-language-modeling #annotations_creators-no-annotation #multilinguality-monolingual #size_categories-10M<n<100M #language-Uzbek #license-apache-2.0 #uz #books #region-us \n### Dataset Summary\n\n\nIn an effort to democratize research on low-resource languages, we release UzBooks dataset, a cleaned book corpus consisting of nearly 40000 books in Uzbek Language divided into two branches: \"original\" and \"lat,\" representing the OCRed (Latin and Cyrillic) and fully Latin versions of the texts, respectively.\n\n\nPlease refer to our blogpost and paper (Coming soon!) for further details.\n\n\nTo load and use dataset, run this script:\n\n\nDataset Structure\n-----------------### Data Instances#### plain\\_text\n\n\n* Size of downloaded dataset files: 16.98 GB\n* Size of the generated dataset: 32.95 GB\n* Total amount of disk used: 49.93 GB\n\n\nAn example of 'train' looks as follows.### Data Fields\n\n\nThe data fields are the same among all splits.#### plain\\_text\n\n\n* 'text': a 'string' feature that contains text of the books." ]
088f8eea135a8c852d6efef2c8f430d2743f4bfb
# Aposemat IoT-23 - a Labeled Dataset with Malcious and Benign Iot Network Traffic **Homepage:** [https://www.stratosphereips.org/datasets-iot23](https://www.stratosphereips.org/datasets-iot23) This dataset contains a subset of the data from 20 captures of Malcious network traffic and 3 captures from live Benign Traffic on Internet of Things (IoT) devices. Created by Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga at the Avast AIC laboratory with the funding of Avast Software, this dataset is one of the best in the field for Intrusion Detection Systems (IDS) for IoT Devices [(Comparative Analysis of IoT Botnet Datasets)](https://doi.org/10.53070/bbd.1173687). The selection of the subset was determined by [Aqeel Ahmed on Kaggle](https://www.kaggle.com/datasets/engraqeel/iot23preprocesseddata) and contains 6 million samples. The Kaggle upload, nor this one, have employed data balancing. The Kaggle card does not contain methodology to understand what criteria was used to select these samples. If you want ensure best practice, use this dataset to mock-up processing the data into a model before using the full dataset with data balancing. This will require processing the 8GB of conn.log.labelled files. This dataset only notes if the data is Malcious or Benign. The original dataset labels the type of malcious traffic aswell. This means this processing of the dataset is only suited for binary classification. # Feature information: All features originate from the [Zeek](https://docs.zeek.org/en/master/scripts/base/protocols/conn/main.zeek.html#type-Conn::Info) processing performed by the dataset creators. [See notes here for caviats for each column](https://docs.zeek.org/en/master/scripts/base/protocols/conn/main.zeek.html#type-Conn::Info). <details> <summary>Expand for feature names, descriptions, and datatypes</summary> Name: id.orig_p Description: The originator’s port number. Data type: int64 - uint64 in original Name: id.resp_p Description: The responder’s port number. Data type: int64 - uint64 in original Name: proto Description: The transport layer protocol of the connection. Data type: string - enum(unknown_transport, tcp, udp, icmp). Only TCP and UDP in subset Name: service Description: An identification of an application protocol being sent over the connection. Data type: optional string Name: duration Description: How long the connection lasted. Data type: optional float64 - time interval Name: orig_bytes Description: The number of payload bytes the originator sent. Data type: optional int64 - uint64 in original Name: resp_bytes Description:The number of payload bytes the responder sent. Data type: optional int64 - uint64 in original Name: conn_state Description: Value indicating connection state. (S0, S1, SF, REJ, S2, S3, RSTO, RSTR, RSTOS0, RSTRH, SH, SHR, OTH) Data type: optional string Name: missed_bytes Description: Indicates the number of bytes missed in content gaps, which is representative of packet loss. Data type: optional int64 - uint64 in original. default = 0 Name: history Description: Records the state history of connections as a string of letters. Data type: optional string Name: orig_pkts Description: Number of packets that the originator sent. Data type: optional int64 - uint64 in original Name: orig_ip_bytes Description: Number of IP level bytes that the originator sent. Data type: optional int64 - uint64 in original Name: resp_pkts Description: Number of packets that the responder sent. Data type: optional int64 - uint64 in original Name: resp_ip_bytes Description: Number of IP level bytes that the responder sent. Data type: optional int64 - uint64 in original Name: label Description: Specifies if data point is benign or some form of malicious. See the dataset creators paper for descriptions of attack types Data type: string - enum(Malicious, Benign) NOTE: ts, uid, id.orig_h, id.resp_h have been removed as they are dataset specific. Models should not be trained with specific timestamps or IP addresses (id.orig_h) using this dataset, as that can lead to over fitting to dataset specific times and addresses. Further local_orig, local_resp have been removed as they are null in all rows, so they are useless for training. </details> ## Citation If you are using this dataset for your research, please reference it as “Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga. (2020). IoT-23: A labeled dataset with malicious and benign IoT network traffic (Version 1.0.0) [Data set]. Zenodo. http://doi.org/10.5281/zenodo.4743746”
19kmunz/iot-23-preprocessed
[ "task_categories:question-answering", "task_categories:tabular-classification", "language:en", "code", "region:us" ]
2023-10-27T15:39:01+00:00
{"language": ["en"], "task_categories": ["question-answering", "tabular-classification"], "pretty_name": "d", "dataset_info": {"features": [{"name": "id.orig_p", "dtype": "int64"}, {"name": "id.resp_p", "dtype": "int64"}, {"name": "proto", "dtype": "string"}, {"name": "service", "dtype": "string"}, {"name": "duration", "dtype": "float64"}, {"name": "orig_bytes", "dtype": "int64"}, {"name": "resp_bytes", "dtype": "int64"}, {"name": "conn_state", "dtype": "string"}, {"name": "missed_bytes", "dtype": "int64"}, {"name": "history", "dtype": "string"}, {"name": "orig_pkts", "dtype": "int64"}, {"name": "orig_ip_bytes", "dtype": "int64"}, {"name": "resp_pkts", "dtype": "int64"}, {"name": "resp_ip_bytes", "dtype": "int64"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 93994789, "num_examples": 819024}], "download_size": 11805369, "dataset_size": 93994789}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "tags": ["code"]}
2023-11-03T17:12:47+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #task_categories-tabular-classification #language-English #code #region-us
# Aposemat IoT-23 - a Labeled Dataset with Malcious and Benign Iot Network Traffic Homepage: URL This dataset contains a subset of the data from 20 captures of Malcious network traffic and 3 captures from live Benign Traffic on Internet of Things (IoT) devices. Created by Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga at the Avast AIC laboratory with the funding of Avast Software, this dataset is one of the best in the field for Intrusion Detection Systems (IDS) for IoT Devices (Comparative Analysis of IoT Botnet Datasets). The selection of the subset was determined by Aqeel Ahmed on Kaggle and contains 6 million samples. The Kaggle upload, nor this one, have employed data balancing. The Kaggle card does not contain methodology to understand what criteria was used to select these samples. If you want ensure best practice, use this dataset to mock-up processing the data into a model before using the full dataset with data balancing. This will require processing the 8GB of URL.labelled files. This dataset only notes if the data is Malcious or Benign. The original dataset labels the type of malcious traffic aswell. This means this processing of the dataset is only suited for binary classification. # Feature information: All features originate from the Zeek processing performed by the dataset creators. See notes here for caviats for each column. <details> <summary>Expand for feature names, descriptions, and datatypes</summary> Name: id.orig_p Description: The originator’s port number. Data type: int64 - uint64 in original Name: id.resp_p Description: The responder’s port number. Data type: int64 - uint64 in original Name: proto Description: The transport layer protocol of the connection. Data type: string - enum(unknown_transport, tcp, udp, icmp). Only TCP and UDP in subset Name: service Description: An identification of an application protocol being sent over the connection. Data type: optional string Name: duration Description: How long the connection lasted. Data type: optional float64 - time interval Name: orig_bytes Description: The number of payload bytes the originator sent. Data type: optional int64 - uint64 in original Name: resp_bytes Description:The number of payload bytes the responder sent. Data type: optional int64 - uint64 in original Name: conn_state Description: Value indicating connection state. (S0, S1, SF, REJ, S2, S3, RSTO, RSTR, RSTOS0, RSTRH, SH, SHR, OTH) Data type: optional string Name: missed_bytes Description: Indicates the number of bytes missed in content gaps, which is representative of packet loss. Data type: optional int64 - uint64 in original. default = 0 Name: history Description: Records the state history of connections as a string of letters. Data type: optional string Name: orig_pkts Description: Number of packets that the originator sent. Data type: optional int64 - uint64 in original Name: orig_ip_bytes Description: Number of IP level bytes that the originator sent. Data type: optional int64 - uint64 in original Name: resp_pkts Description: Number of packets that the responder sent. Data type: optional int64 - uint64 in original Name: resp_ip_bytes Description: Number of IP level bytes that the responder sent. Data type: optional int64 - uint64 in original Name: label Description: Specifies if data point is benign or some form of malicious. See the dataset creators paper for descriptions of attack types Data type: string - enum(Malicious, Benign) NOTE: ts, uid, id.orig_h, id.resp_h have been removed as they are dataset specific. Models should not be trained with specific timestamps or IP addresses (id.orig_h) using this dataset, as that can lead to over fitting to dataset specific times and addresses. Further local_orig, local_resp have been removed as they are null in all rows, so they are useless for training. </details> If you are using this dataset for your research, please reference it as “Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga. (2020). IoT-23: A labeled dataset with malicious and benign IoT network traffic (Version 1.0.0) [Data set]. Zenodo. URL”
[ "# Aposemat IoT-23 - a Labeled Dataset with Malcious and Benign Iot Network Traffic \nHomepage: URL\n\nThis dataset contains a subset of the data from 20 captures of Malcious network traffic and 3 captures from live Benign Traffic on Internet of Things (IoT) devices. Created by Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga at the Avast AIC laboratory with the funding of Avast Software, this dataset is one of the best in the field for Intrusion Detection Systems (IDS) for IoT Devices (Comparative Analysis of IoT Botnet Datasets). \n\nThe selection of the subset was determined by Aqeel Ahmed on Kaggle and contains 6 million samples. The Kaggle upload, nor this one, have employed data balancing. The Kaggle card does not contain methodology to understand what criteria was used to select these samples. If you want ensure best practice, use this dataset to mock-up processing the data into a model before using the full dataset with data balancing. This will require processing the 8GB of URL.labelled files.\n\nThis dataset only notes if the data is Malcious or Benign. The original dataset labels the type of malcious traffic aswell. This means this processing of the dataset is only suited for binary classification.", "# Feature information:\n\nAll features originate from the Zeek processing performed by the dataset creators. See notes here for caviats for each column. \n<details>\n <summary>Expand for feature names, descriptions, and datatypes</summary>\n\nName: id.orig_p \nDescription: The originator’s port number. \nData type: int64 - uint64 in original \n\nName: id.resp_p \nDescription: The responder’s port number. \nData type: int64 - uint64 in original \n\nName: proto \nDescription: The transport layer protocol of the connection. \nData type: string - enum(unknown_transport, tcp, udp, icmp). Only TCP and UDP in subset \n\nName: service \nDescription: An identification of an application protocol being sent over the connection. \nData type: optional string \n\nName: duration \nDescription: How long the connection lasted. \nData type: optional float64 - time interval \n\nName: orig_bytes \nDescription: The number of payload bytes the originator sent. \nData type: optional int64 - uint64 in original \n\nName: resp_bytes \nDescription:The number of payload bytes the responder sent. \nData type: optional int64 - uint64 in original \n\nName: conn_state \nDescription: Value indicating connection state. (S0, S1, SF, REJ, S2, S3, RSTO, RSTR, RSTOS0, RSTRH, SH, SHR, OTH) \nData type: optional string \n\nName: missed_bytes \nDescription: Indicates the number of bytes missed in content gaps, which is representative of packet loss. \nData type: optional int64 - uint64 in original. default = 0\n\nName: history \nDescription: Records the state history of connections as a string of letters. \nData type: optional string \n\nName: orig_pkts \nDescription: Number of packets that the originator sent. \nData type: optional int64 - uint64 in original \n\nName: orig_ip_bytes \nDescription: Number of IP level bytes that the originator sent. \nData type: optional int64 - uint64 in original \n\nName: resp_pkts \nDescription: Number of packets that the responder sent. \nData type: optional int64 - uint64 in original \n\nName: resp_ip_bytes \nDescription: Number of IP level bytes that the responder sent. \nData type: optional int64 - uint64 in original \n\nName: label \nDescription: Specifies if data point is benign or some form of malicious. See the dataset creators paper for descriptions of attack types \nData type: string - enum(Malicious, Benign)\n\nNOTE: ts, uid, id.orig_h, id.resp_h have been removed as they are dataset specific. Models should not be trained with specific timestamps or IP addresses (id.orig_h) using this dataset, as that can lead to over fitting to dataset specific times and addresses. \nFurther local_orig, local_resp have been removed as they are null in all rows, so they are useless for training.\n</details>\n\nIf you are using this dataset for your research, please reference it as “Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga. (2020). IoT-23: A labeled dataset with malicious and benign IoT network traffic (Version 1.0.0) [Data set]. Zenodo. URL”" ]
[ "TAGS\n#task_categories-question-answering #task_categories-tabular-classification #language-English #code #region-us \n", "# Aposemat IoT-23 - a Labeled Dataset with Malcious and Benign Iot Network Traffic \nHomepage: URL\n\nThis dataset contains a subset of the data from 20 captures of Malcious network traffic and 3 captures from live Benign Traffic on Internet of Things (IoT) devices. Created by Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga at the Avast AIC laboratory with the funding of Avast Software, this dataset is one of the best in the field for Intrusion Detection Systems (IDS) for IoT Devices (Comparative Analysis of IoT Botnet Datasets). \n\nThe selection of the subset was determined by Aqeel Ahmed on Kaggle and contains 6 million samples. The Kaggle upload, nor this one, have employed data balancing. The Kaggle card does not contain methodology to understand what criteria was used to select these samples. If you want ensure best practice, use this dataset to mock-up processing the data into a model before using the full dataset with data balancing. This will require processing the 8GB of URL.labelled files.\n\nThis dataset only notes if the data is Malcious or Benign. The original dataset labels the type of malcious traffic aswell. This means this processing of the dataset is only suited for binary classification.", "# Feature information:\n\nAll features originate from the Zeek processing performed by the dataset creators. See notes here for caviats for each column. \n<details>\n <summary>Expand for feature names, descriptions, and datatypes</summary>\n\nName: id.orig_p \nDescription: The originator’s port number. \nData type: int64 - uint64 in original \n\nName: id.resp_p \nDescription: The responder’s port number. \nData type: int64 - uint64 in original \n\nName: proto \nDescription: The transport layer protocol of the connection. \nData type: string - enum(unknown_transport, tcp, udp, icmp). Only TCP and UDP in subset \n\nName: service \nDescription: An identification of an application protocol being sent over the connection. \nData type: optional string \n\nName: duration \nDescription: How long the connection lasted. \nData type: optional float64 - time interval \n\nName: orig_bytes \nDescription: The number of payload bytes the originator sent. \nData type: optional int64 - uint64 in original \n\nName: resp_bytes \nDescription:The number of payload bytes the responder sent. \nData type: optional int64 - uint64 in original \n\nName: conn_state \nDescription: Value indicating connection state. (S0, S1, SF, REJ, S2, S3, RSTO, RSTR, RSTOS0, RSTRH, SH, SHR, OTH) \nData type: optional string \n\nName: missed_bytes \nDescription: Indicates the number of bytes missed in content gaps, which is representative of packet loss. \nData type: optional int64 - uint64 in original. default = 0\n\nName: history \nDescription: Records the state history of connections as a string of letters. \nData type: optional string \n\nName: orig_pkts \nDescription: Number of packets that the originator sent. \nData type: optional int64 - uint64 in original \n\nName: orig_ip_bytes \nDescription: Number of IP level bytes that the originator sent. \nData type: optional int64 - uint64 in original \n\nName: resp_pkts \nDescription: Number of packets that the responder sent. \nData type: optional int64 - uint64 in original \n\nName: resp_ip_bytes \nDescription: Number of IP level bytes that the responder sent. \nData type: optional int64 - uint64 in original \n\nName: label \nDescription: Specifies if data point is benign or some form of malicious. See the dataset creators paper for descriptions of attack types \nData type: string - enum(Malicious, Benign)\n\nNOTE: ts, uid, id.orig_h, id.resp_h have been removed as they are dataset specific. Models should not be trained with specific timestamps or IP addresses (id.orig_h) using this dataset, as that can lead to over fitting to dataset specific times and addresses. \nFurther local_orig, local_resp have been removed as they are null in all rows, so they are useless for training.\n</details>\n\nIf you are using this dataset for your research, please reference it as “Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga. (2020). IoT-23: A labeled dataset with malicious and benign IoT network traffic (Version 1.0.0) [Data set]. Zenodo. URL”" ]
[ 36, 303, 781 ]
[ "passage: TAGS\n#task_categories-question-answering #task_categories-tabular-classification #language-English #code #region-us \n# Aposemat IoT-23 - a Labeled Dataset with Malcious and Benign Iot Network Traffic \nHomepage: URL\n\nThis dataset contains a subset of the data from 20 captures of Malcious network traffic and 3 captures from live Benign Traffic on Internet of Things (IoT) devices. Created by Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga at the Avast AIC laboratory with the funding of Avast Software, this dataset is one of the best in the field for Intrusion Detection Systems (IDS) for IoT Devices (Comparative Analysis of IoT Botnet Datasets). \n\nThe selection of the subset was determined by Aqeel Ahmed on Kaggle and contains 6 million samples. The Kaggle upload, nor this one, have employed data balancing. The Kaggle card does not contain methodology to understand what criteria was used to select these samples. If you want ensure best practice, use this dataset to mock-up processing the data into a model before using the full dataset with data balancing. This will require processing the 8GB of URL.labelled files.\n\nThis dataset only notes if the data is Malcious or Benign. The original dataset labels the type of malcious traffic aswell. This means this processing of the dataset is only suited for binary classification." ]
b9600ab04bc220107957f1539b4a34f40419dac2
# Dataset Card for "tts_male" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
linhtran92/tts_male
[ "region:us" ]
2023-10-27T15:48:07+00:00
{"dataset_info": {"features": [{"name": "sentence_norm", "dtype": "string"}, {"name": "audio", "struct": [{"name": "array", "sequence": "int64"}, {"name": "path", "dtype": "string"}, {"name": "sampling_rate", "dtype": "int64"}]}, {"name": "wer", "dtype": "int64"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 222336754, "num_examples": 499}], "download_size": 45628084, "dataset_size": 222336754}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-27T15:48:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tts_male" More Information needed
[ "# Dataset Card for \"tts_male\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tts_male\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"tts_male\"\n\nMore Information needed" ]
31e5338c0256a86b90113e91a6622755b8a8e6d2
# Dataset Card for "soict_train_dataset_filter_v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
thanhduycao/soict_train_dataset_filter_v2
[ "region:us" ]
2023-10-27T15:52:45+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "audio", "struct": [{"name": "array", "sequence": "float64"}, {"name": "path", "dtype": "string"}, {"name": "sampling_rate", "dtype": "int64"}]}, {"name": "sentence_norm", "dtype": "string"}, {"name": "wer", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 3226357801, "num_examples": 6184}, {"name": "test", "num_bytes": 565495055, "num_examples": 1092}], "download_size": 900623742, "dataset_size": 3791852856}}
2023-10-27T15:53:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "soict_train_dataset_filter_v2" More Information needed
[ "# Dataset Card for \"soict_train_dataset_filter_v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"soict_train_dataset_filter_v2\"\n\nMore Information needed" ]
[ 6, 24 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"soict_train_dataset_filter_v2\"\n\nMore Information needed" ]
04de595b618478668d5974a861a9f4f7305814b1
# Aposemat IoT-23 - a Labeled Dataset with Malcious and Benign Iot Network Traffic **Homepage:** [https://www.stratosphereips.org/datasets-iot23](https://www.stratosphereips.org/datasets-iot23) This dataset contains a subset of the data from 20 captures of Malcious network traffic and 3 captures from live Benign Traffic on Internet of Things (IoT) devices. Created by Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga at the Avast AIC laboratory with the funding of Avast Software, this dataset is one of the best in the field for Intrusion Detection Systems (IDS) for IoT Devices [(Comparative Analysis of IoT Botnet Datasets)](https://doi.org/10.53070/bbd.1173687). The selection of the subset was determined by [Aqeel Ahmed on Kaggle](https://www.kaggle.com/datasets/engraqeel/iot23preprocesseddata) and contains 6 million samples. The Kaggle upload, nor this one, have employed data balancing. The Kaggle card does not contain methodology to understand what criteria was used to select these samples. If you want ensure best practice, use this dataset to mock-up processing the data into a model before using the full dataset with data balancing. This will require processing the 8GB of conn.log.labelled files. # Feature information: All features originate from the [Zeek](https://docs.zeek.org/en/master/scripts/base/protocols/conn/main.zeek.html#type-Conn::Info) processing performed by the dataset creators. [See notes here for caviats for each column](https://docs.zeek.org/en/master/scripts/base/protocols/conn/main.zeek.html#type-Conn::Info). <details> <summary>Expand for feature names, descriptions, and datatypes</summary> Name: ts Desription: This is the time of the first packet. Data Type: float64 - Timestamp Name: uid Description: A Zeek-defined unique identifier of the connection. Data type: string Name: id.orig_h Description: The originator’s IP address. Data type: string - for the form 255.255.255.255 for IPv4 or [aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222] for IPv6 Name: id.orig_p Description: The originator’s port number. Data type: int64 - uint64 in original Name: id.resp_h Description: The responder’s IP address. Data type: string - for the form 255.255.255.255 for IPv4 or [aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222] for IPv6 Name: id.resp_p Description: The responder’s port number. Data type: int64 - uint64 in original Name: proto Description: The transport layer protocol of the connection. Data type: string - enum(unknown_transport, tcp, udp, icmp). Only TCP and UDP in subset Name: service Description: An identification of an application protocol being sent over the connection. Data type: optional string Name: duration Description: How long the connection lasted. Data type: optional float64 - time interval Name: orig_bytes Description: The number of payload bytes the originator sent. Data type: optional int64 - uint64 in original Name: resp_bytes Description:The number of payload bytes the responder sent. Data type: optional int64 - uint64 in original Name: conn_state Description: Value indicating connection state. (S0, S1, SF, REJ, S2, S3, RSTO, RSTR, RSTOS0, RSTRH, SH, SHR, OTH) Data type: optional string Name: local_orig Description: If the connection is originated locally, this value will be T. If it was originated remotely it will be F. Data type: optional float64 - bool in original but null for all columns Name: local_resp Description: If the connection is responded to locally, this value will be T. If it was responded to remotely it will be F. Data type: optional float64 - bool in original but null for all columns Name: missed_bytes Description: Indicates the number of bytes missed in content gaps, which is representative of packet loss. Data type: optional int64 - uint64 in original. default = 0 Name: history Description: Records the state history of connections as a string of letters. Data type: optional string Name: orig_pkts Description: Number of packets that the originator sent. Data type: optional int64 - uint64 in original Name: orig_ip_bytes Description: Number of IP level bytes that the originator sent. Data type: optional int64 - uint64 in original Name: resp_pkts Description: Number of packets that the responder sent. Data type: optional int64 - uint64 in original Name: resp_ip_bytes Description: Number of IP level bytes that the responder sent. Data type: optional int64 - uint64 in original Name: label Description: Specifies if data point is benign or some form of malicious. See the dataset creators paper for descriptions of attack types Data type: string - enum('PartOfAHorizontalPortScan', 'Okiru', 'DDoS', 'C&C-HeartBeat', 'Benign', 'C&C-Torii', 'C&C', 'C&C-FileDownload', 'Okiru-Attack', 'Attack', 'FileDownload', 'C&C-HeartBeat-FileDownload', 'C&C-Mirai') NOTE: ts, uid, id.orig_h, id.resp_h SHOULD BE removed as they are dataset specific. Models should not be trained with specific timestamps or IP addresses (id.orig_h), as that can lead to over fitting to dataset specific times and addresses. Further local_orig, local_resp SHOULD BE removed as they are null in all rows, so they are useless for training. </details> ## Citation If you are using this dataset for your research, please reference it as “Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga. (2020). IoT-23: A labeled dataset with malicious and benign IoT network traffic (Version 1.0.0) [Data set]. Zenodo. http://doi.org/10.5281/zenodo.4743746” [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
19kmunz/iot-23-preprocessed-allcolumns
[ "task_categories:tabular-classification", "task_categories:table-question-answering", "language:en", "code", "region:us" ]
2023-10-27T15:57:09+00:00
{"language": ["en"], "task_categories": ["tabular-classification", "table-question-answering"], "dataset_info": {"features": [{"name": "ts", "dtype": "float64"}, {"name": "uid", "dtype": "string"}, {"name": "id.orig_h", "dtype": "string"}, {"name": "id.orig_p", "dtype": "int64"}, {"name": "id.resp_h", "dtype": "string"}, {"name": "id.resp_p", "dtype": "int64"}, {"name": "proto", "dtype": "string"}, {"name": "service", "dtype": "string"}, {"name": "duration", "dtype": "float64"}, {"name": "orig_bytes", "dtype": "int64"}, {"name": "resp_bytes", "dtype": "int64"}, {"name": "conn_state", "dtype": "string"}, {"name": "local_orig", "dtype": "float64"}, {"name": "local_resp", "dtype": "float64"}, {"name": "missed_bytes", "dtype": "int64"}, {"name": "history", "dtype": "string"}, {"name": "orig_pkts", "dtype": "int64"}, {"name": "orig_ip_bytes", "dtype": "int64"}, {"name": "resp_pkts", "dtype": "int64"}, {"name": "resp_ip_bytes", "dtype": "int64"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1232978140, "num_examples": 6046623}], "download_size": 274218995, "dataset_size": 1232978140}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "tags": ["code"]}
2023-11-03T16:44:31+00:00
[]
[ "en" ]
TAGS #task_categories-tabular-classification #task_categories-table-question-answering #language-English #code #region-us
# Aposemat IoT-23 - a Labeled Dataset with Malcious and Benign Iot Network Traffic Homepage: URL This dataset contains a subset of the data from 20 captures of Malcious network traffic and 3 captures from live Benign Traffic on Internet of Things (IoT) devices. Created by Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga at the Avast AIC laboratory with the funding of Avast Software, this dataset is one of the best in the field for Intrusion Detection Systems (IDS) for IoT Devices (Comparative Analysis of IoT Botnet Datasets). The selection of the subset was determined by Aqeel Ahmed on Kaggle and contains 6 million samples. The Kaggle upload, nor this one, have employed data balancing. The Kaggle card does not contain methodology to understand what criteria was used to select these samples. If you want ensure best practice, use this dataset to mock-up processing the data into a model before using the full dataset with data balancing. This will require processing the 8GB of URL.labelled files. # Feature information: All features originate from the Zeek processing performed by the dataset creators. See notes here for caviats for each column. <details> <summary>Expand for feature names, descriptions, and datatypes</summary> Name: ts Desription: This is the time of the first packet. Data Type: float64 - Timestamp Name: uid Description: A Zeek-defined unique identifier of the connection. Data type: string Name: id.orig_h Description: The originator’s IP address. Data type: string - for the form 255.255.255.255 for IPv4 or [aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222] for IPv6 Name: id.orig_p Description: The originator’s port number. Data type: int64 - uint64 in original Name: id.resp_h Description: The responder’s IP address. Data type: string - for the form 255.255.255.255 for IPv4 or [aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222] for IPv6 Name: id.resp_p Description: The responder’s port number. Data type: int64 - uint64 in original Name: proto Description: The transport layer protocol of the connection. Data type: string - enum(unknown_transport, tcp, udp, icmp). Only TCP and UDP in subset Name: service Description: An identification of an application protocol being sent over the connection. Data type: optional string Name: duration Description: How long the connection lasted. Data type: optional float64 - time interval Name: orig_bytes Description: The number of payload bytes the originator sent. Data type: optional int64 - uint64 in original Name: resp_bytes Description:The number of payload bytes the responder sent. Data type: optional int64 - uint64 in original Name: conn_state Description: Value indicating connection state. (S0, S1, SF, REJ, S2, S3, RSTO, RSTR, RSTOS0, RSTRH, SH, SHR, OTH) Data type: optional string Name: local_orig Description: If the connection is originated locally, this value will be T. If it was originated remotely it will be F. Data type: optional float64 - bool in original but null for all columns Name: local_resp Description: If the connection is responded to locally, this value will be T. If it was responded to remotely it will be F. Data type: optional float64 - bool in original but null for all columns Name: missed_bytes Description: Indicates the number of bytes missed in content gaps, which is representative of packet loss. Data type: optional int64 - uint64 in original. default = 0 Name: history Description: Records the state history of connections as a string of letters. Data type: optional string Name: orig_pkts Description: Number of packets that the originator sent. Data type: optional int64 - uint64 in original Name: orig_ip_bytes Description: Number of IP level bytes that the originator sent. Data type: optional int64 - uint64 in original Name: resp_pkts Description: Number of packets that the responder sent. Data type: optional int64 - uint64 in original Name: resp_ip_bytes Description: Number of IP level bytes that the responder sent. Data type: optional int64 - uint64 in original Name: label Description: Specifies if data point is benign or some form of malicious. See the dataset creators paper for descriptions of attack types Data type: string - enum('PartOfAHorizontalPortScan', 'Okiru', 'DDoS', 'C&C-HeartBeat', 'Benign', 'C&C-Torii', 'C&C', 'C&C-FileDownload', 'Okiru-Attack', 'Attack', 'FileDownload', 'C&C-HeartBeat-FileDownload', 'C&C-Mirai') NOTE: ts, uid, id.orig_h, id.resp_h SHOULD BE removed as they are dataset specific. Models should not be trained with specific timestamps or IP addresses (id.orig_h), as that can lead to over fitting to dataset specific times and addresses. Further local_orig, local_resp SHOULD BE removed as they are null in all rows, so they are useless for training. </details> If you are using this dataset for your research, please reference it as “Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga. (2020). IoT-23: A labeled dataset with malicious and benign IoT network traffic (Version 1.0.0) [Data set]. Zenodo. URL” More Information needed
[ "# Aposemat IoT-23 - a Labeled Dataset with Malcious and Benign Iot Network Traffic \nHomepage: URL\n\nThis dataset contains a subset of the data from 20 captures of Malcious network traffic and 3 captures from live Benign Traffic on Internet of Things (IoT) devices. Created by Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga at the Avast AIC laboratory with the funding of Avast Software, this dataset is one of the best in the field for Intrusion Detection Systems (IDS) for IoT Devices (Comparative Analysis of IoT Botnet Datasets). \n\nThe selection of the subset was determined by Aqeel Ahmed on Kaggle and contains 6 million samples. The Kaggle upload, nor this one, have employed data balancing. The Kaggle card does not contain methodology to understand what criteria was used to select these samples. If you want ensure best practice, use this dataset to mock-up processing the data into a model before using the full dataset with data balancing. This will require processing the 8GB of URL.labelled files.", "# Feature information:\n\nAll features originate from the Zeek processing performed by the dataset creators. See notes here for caviats for each column. \n<details>\n <summary>Expand for feature names, descriptions, and datatypes</summary>\n\nName: ts \nDesription: This is the time of the first packet. \nData Type: float64 - Timestamp \n\nName: uid \nDescription: A Zeek-defined unique identifier of the connection. \nData type: string \n\nName: id.orig_h \nDescription: The originator’s IP address. \nData type: string - for the form 255.255.255.255 for IPv4 or [aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222] for IPv6 \n\nName: id.orig_p \nDescription: The originator’s port number. \nData type: int64 - uint64 in original \n\nName: id.resp_h \nDescription: The responder’s IP address. \nData type: string - for the form 255.255.255.255 for IPv4 or [aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222] for IPv6 \n\nName: id.resp_p \nDescription: The responder’s port number. \nData type: int64 - uint64 in original \n\nName: proto \nDescription: The transport layer protocol of the connection. \nData type: string - enum(unknown_transport, tcp, udp, icmp). Only TCP and UDP in subset \n\nName: service \nDescription: An identification of an application protocol being sent over the connection. \nData type: optional string \n\nName: duration \nDescription: How long the connection lasted. \nData type: optional float64 - time interval \n\nName: orig_bytes \nDescription: The number of payload bytes the originator sent. \nData type: optional int64 - uint64 in original \n\nName: resp_bytes \nDescription:The number of payload bytes the responder sent. \nData type: optional int64 - uint64 in original \n\nName: conn_state \nDescription: Value indicating connection state. (S0, S1, SF, REJ, S2, S3, RSTO, RSTR, RSTOS0, RSTRH, SH, SHR, OTH) \nData type: optional string \n\nName: local_orig \nDescription: If the connection is originated locally, this value will be T. If it was originated remotely it will be F.\nData type: optional float64 - bool in original but null for all columns\n\nName: local_resp\nDescription: If the connection is responded to locally, this value will be T. If it was responded to remotely it will be F.\nData type: optional float64 - bool in original but null for all columns\n\nName: missed_bytes \nDescription: Indicates the number of bytes missed in content gaps, which is representative of packet loss. \nData type: optional int64 - uint64 in original. default = 0\n\nName: history \nDescription: Records the state history of connections as a string of letters. \nData type: optional string \n\nName: orig_pkts \nDescription: Number of packets that the originator sent. \nData type: optional int64 - uint64 in original \n\nName: orig_ip_bytes \nDescription: Number of IP level bytes that the originator sent. \nData type: optional int64 - uint64 in original \n\nName: resp_pkts \nDescription: Number of packets that the responder sent. \nData type: optional int64 - uint64 in original \n\nName: resp_ip_bytes \nDescription: Number of IP level bytes that the responder sent. \nData type: optional int64 - uint64 in original \n\nName: label \nDescription: Specifies if data point is benign or some form of malicious. See the dataset creators paper for descriptions of attack types \nData type: string - enum('PartOfAHorizontalPortScan', 'Okiru', 'DDoS', 'C&C-HeartBeat',\n 'Benign', 'C&C-Torii', 'C&C', 'C&C-FileDownload', 'Okiru-Attack',\n 'Attack', 'FileDownload', 'C&C-HeartBeat-FileDownload',\n 'C&C-Mirai')\n\nNOTE: ts, uid, id.orig_h, id.resp_h SHOULD BE removed as they are dataset specific. Models should not be trained with specific timestamps or IP addresses (id.orig_h), as that can lead to over fitting to dataset specific times and addresses. \nFurther local_orig, local_resp SHOULD BE removed as they are null in all rows, so they are useless for training.\n</details>\n\nIf you are using this dataset for your research, please reference it as “Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga. (2020). IoT-23: A labeled dataset with malicious and benign IoT network traffic (Version 1.0.0) [Data set]. Zenodo. URL”\nMore Information needed" ]
[ "TAGS\n#task_categories-tabular-classification #task_categories-table-question-answering #language-English #code #region-us \n", "# Aposemat IoT-23 - a Labeled Dataset with Malcious and Benign Iot Network Traffic \nHomepage: URL\n\nThis dataset contains a subset of the data from 20 captures of Malcious network traffic and 3 captures from live Benign Traffic on Internet of Things (IoT) devices. Created by Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga at the Avast AIC laboratory with the funding of Avast Software, this dataset is one of the best in the field for Intrusion Detection Systems (IDS) for IoT Devices (Comparative Analysis of IoT Botnet Datasets). \n\nThe selection of the subset was determined by Aqeel Ahmed on Kaggle and contains 6 million samples. The Kaggle upload, nor this one, have employed data balancing. The Kaggle card does not contain methodology to understand what criteria was used to select these samples. If you want ensure best practice, use this dataset to mock-up processing the data into a model before using the full dataset with data balancing. This will require processing the 8GB of URL.labelled files.", "# Feature information:\n\nAll features originate from the Zeek processing performed by the dataset creators. See notes here for caviats for each column. \n<details>\n <summary>Expand for feature names, descriptions, and datatypes</summary>\n\nName: ts \nDesription: This is the time of the first packet. \nData Type: float64 - Timestamp \n\nName: uid \nDescription: A Zeek-defined unique identifier of the connection. \nData type: string \n\nName: id.orig_h \nDescription: The originator’s IP address. \nData type: string - for the form 255.255.255.255 for IPv4 or [aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222] for IPv6 \n\nName: id.orig_p \nDescription: The originator’s port number. \nData type: int64 - uint64 in original \n\nName: id.resp_h \nDescription: The responder’s IP address. \nData type: string - for the form 255.255.255.255 for IPv4 or [aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222] for IPv6 \n\nName: id.resp_p \nDescription: The responder’s port number. \nData type: int64 - uint64 in original \n\nName: proto \nDescription: The transport layer protocol of the connection. \nData type: string - enum(unknown_transport, tcp, udp, icmp). Only TCP and UDP in subset \n\nName: service \nDescription: An identification of an application protocol being sent over the connection. \nData type: optional string \n\nName: duration \nDescription: How long the connection lasted. \nData type: optional float64 - time interval \n\nName: orig_bytes \nDescription: The number of payload bytes the originator sent. \nData type: optional int64 - uint64 in original \n\nName: resp_bytes \nDescription:The number of payload bytes the responder sent. \nData type: optional int64 - uint64 in original \n\nName: conn_state \nDescription: Value indicating connection state. (S0, S1, SF, REJ, S2, S3, RSTO, RSTR, RSTOS0, RSTRH, SH, SHR, OTH) \nData type: optional string \n\nName: local_orig \nDescription: If the connection is originated locally, this value will be T. If it was originated remotely it will be F.\nData type: optional float64 - bool in original but null for all columns\n\nName: local_resp\nDescription: If the connection is responded to locally, this value will be T. If it was responded to remotely it will be F.\nData type: optional float64 - bool in original but null for all columns\n\nName: missed_bytes \nDescription: Indicates the number of bytes missed in content gaps, which is representative of packet loss. \nData type: optional int64 - uint64 in original. default = 0\n\nName: history \nDescription: Records the state history of connections as a string of letters. \nData type: optional string \n\nName: orig_pkts \nDescription: Number of packets that the originator sent. \nData type: optional int64 - uint64 in original \n\nName: orig_ip_bytes \nDescription: Number of IP level bytes that the originator sent. \nData type: optional int64 - uint64 in original \n\nName: resp_pkts \nDescription: Number of packets that the responder sent. \nData type: optional int64 - uint64 in original \n\nName: resp_ip_bytes \nDescription: Number of IP level bytes that the responder sent. \nData type: optional int64 - uint64 in original \n\nName: label \nDescription: Specifies if data point is benign or some form of malicious. See the dataset creators paper for descriptions of attack types \nData type: string - enum('PartOfAHorizontalPortScan', 'Okiru', 'DDoS', 'C&C-HeartBeat',\n 'Benign', 'C&C-Torii', 'C&C', 'C&C-FileDownload', 'Okiru-Attack',\n 'Attack', 'FileDownload', 'C&C-HeartBeat-FileDownload',\n 'C&C-Mirai')\n\nNOTE: ts, uid, id.orig_h, id.resp_h SHOULD BE removed as they are dataset specific. Models should not be trained with specific timestamps or IP addresses (id.orig_h), as that can lead to over fitting to dataset specific times and addresses. \nFurther local_orig, local_resp SHOULD BE removed as they are null in all rows, so they are useless for training.\n</details>\n\nIf you are using this dataset for your research, please reference it as “Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga. (2020). IoT-23: A labeled dataset with malicious and benign IoT network traffic (Version 1.0.0) [Data set]. Zenodo. URL”\nMore Information needed" ]
[ 38, 253, 1171 ]
[ "passage: TAGS\n#task_categories-tabular-classification #task_categories-table-question-answering #language-English #code #region-us \n# Aposemat IoT-23 - a Labeled Dataset with Malcious and Benign Iot Network Traffic \nHomepage: URL\n\nThis dataset contains a subset of the data from 20 captures of Malcious network traffic and 3 captures from live Benign Traffic on Internet of Things (IoT) devices. Created by Sebastian Garcia, Agustin Parmisano, & Maria Jose Erquiaga at the Avast AIC laboratory with the funding of Avast Software, this dataset is one of the best in the field for Intrusion Detection Systems (IDS) for IoT Devices (Comparative Analysis of IoT Botnet Datasets). \n\nThe selection of the subset was determined by Aqeel Ahmed on Kaggle and contains 6 million samples. The Kaggle upload, nor this one, have employed data balancing. The Kaggle card does not contain methodology to understand what criteria was used to select these samples. If you want ensure best practice, use this dataset to mock-up processing the data into a model before using the full dataset with data balancing. This will require processing the 8GB of URL.labelled files." ]
cceec979bc7fa4ec01e2ef6be667d8cf7a12a58f
# Dataset Card for "tts_female" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
linhtran92/tts_female
[ "region:us" ]
2023-10-27T16:12:14+00:00
{"dataset_info": {"features": [{"name": "sentence_norm", "dtype": "string"}, {"name": "audio", "struct": [{"name": "array", "sequence": "int64"}, {"name": "path", "dtype": "string"}, {"name": "sampling_rate", "dtype": "int64"}]}, {"name": "wer", "dtype": "int64"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 212990169, "num_examples": 498}], "download_size": 47949623, "dataset_size": 212990169}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-27T16:12:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tts_female" More Information needed
[ "# Dataset Card for \"tts_female\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tts_female\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"tts_female\"\n\nMore Information needed" ]
9c827e242f262444b000156663024ddcd1d24200
# Dataset Card for "Dataset_semantic_alignment_translation_en-es-direction_en-pt_br-direction" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Weni/Dataset_semantic_alignment_translation_en-es-direction_en-pt_br-direction
[ "region:us" ]
2023-10-27T16:13:41+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "prompt", "dtype": "string"}, {"name": "string", "dtype": "string"}, {"name": "string_translation", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9521461, "num_examples": 40001}], "download_size": 3814409, "dataset_size": 9521461}}
2023-11-03T14:14:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Dataset_semantic_alignment_translation_en-es-direction_en-pt_br-direction" More Information needed
[ "# Dataset Card for \"Dataset_semantic_alignment_translation_en-es-direction_en-pt_br-direction\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Dataset_semantic_alignment_translation_en-es-direction_en-pt_br-direction\"\n\nMore Information needed" ]
[ 6, 37 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Dataset_semantic_alignment_translation_en-es-direction_en-pt_br-direction\"\n\nMore Information needed" ]
a8bf9f11cd05b1203443aea22e6e2ad900f54ad7
# Dataset Card for "tts_997" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
linhtran92/tts_997
[ "region:us" ]
2023-10-27T16:13:56+00:00
{"dataset_info": {"features": [{"name": "sentence_norm", "dtype": "string"}, {"name": "audio", "struct": [{"name": "array", "sequence": "int64"}, {"name": "path", "dtype": "string"}, {"name": "sampling_rate", "dtype": "int64"}]}, {"name": "wer", "dtype": "int64"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 435326923.0, "num_examples": 997}], "download_size": 93711170, "dataset_size": 435326923.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-27T16:14:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tts_997" More Information needed
[ "# Dataset Card for \"tts_997\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tts_997\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"tts_997\"\n\nMore Information needed" ]
0db3fd74a59edb6733cdc4a9cf968f881ce9ae26
# Dataset Card for "TestUpload3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
MaxReynolds/TestUpload3
[ "region:us" ]
2023-10-27T16:22:44+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1258070.0, "num_examples": 10}], "download_size": 1259602, "dataset_size": 1258070.0}}
2023-10-27T16:22:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "TestUpload3" More Information needed
[ "# Dataset Card for \"TestUpload3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"TestUpload3\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"TestUpload3\"\n\nMore Information needed" ]
2e0b2b9f94ffd978ba3e24496dafbc3611978f53
# Dataset Card for "private_prediction_1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
quocanh34/private_prediction_1
[ "region:us" ]
2023-10-27T16:23:52+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "id", "dtype": "string"}, {"name": "pred_str", "dtype": "string"}, {"name": "pred_str_norm", "dtype": "string"}, {"name": "intent", "dtype": "string"}, {"name": "entities", "list": [{"name": "filler", "dtype": "string"}, {"name": "type", "dtype": "string"}]}, {"name": "file", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 174533608.625, "num_examples": 1299}], "download_size": 164304934, "dataset_size": 174533608.625}}
2023-10-27T16:24:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for "private_prediction_1" More Information needed
[ "# Dataset Card for \"private_prediction_1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"private_prediction_1\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"private_prediction_1\"\n\nMore Information needed" ]
28fd3a2dd76800622a53b08c44fc3521dd80e6e3
# Dataset Card for text-descriptives-metadata This dataset has been created with [Argilla](https://docs.argilla.io). As shown in the sections below, this dataset can be loaded into Argilla as explained in [Load with Argilla](#load-with-argilla), or used directly with the `datasets` library in [Load with `datasets`](#load-with-datasets). ## Dataset Description - **Homepage:** https://argilla.io - **Repository:** https://github.com/argilla-io/argilla - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named `argilla.yaml`. This configuration file will be used to configure the dataset when using the `FeedbackDataset.from_huggingface` method in Argilla. * Dataset records in a format compatible with HuggingFace `datasets`. These records will be loaded automatically when using `FeedbackDataset.from_huggingface` and can be loaded independently using the `datasets` library via `load_dataset`. * The [annotation guidelines](#annotation-guidelines) that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code: ```python import argilla as rg ds = rg.FeedbackDataset.from_huggingface("nataliaElv/text-descriptives-metadata") ``` ### Load with `datasets` To load this dataset with `datasets`, you'll just need to install `datasets` as `pip install datasets --upgrade` and then use the following code: ```python from datasets import load_dataset ds = load_dataset("nataliaElv/text-descriptives-metadata") ``` ### Supported Tasks and Leaderboards This dataset can contain [multiple fields, questions and responses](https://docs.argilla.io/en/latest/conceptual_guides/data_model.html#feedback-dataset) so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the [Dataset Structure section](#dataset-structure). There are no leaderboards associated with this dataset. ### Languages [More Information Needed] ## Dataset Structure ### Data in Argilla The dataset is created in Argilla with: **fields**, **questions**, **suggestions**, **metadata**, and **guidelines**. The **fields** are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. | Field Name | Title | Type | Required | Markdown | | ---------- | ----- | ---- | -------- | -------- | | prompt | Prompt | FieldTypes.text | True | True | | context | Context | FieldTypes.text | False | True | The **questions** are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label_selection, multi_label_selection, or ranking. | Question Name | Title | Type | Required | Description | Values/Labels | | ------------- | ----- | ---- | -------- | ----------- | ------------- | | response | Response | QuestionTypes.text | True | N/A | N/A | The **suggestions** are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". **✨ NEW** The **metadata** is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. The **guidelines**, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the [annotation guidelines](#annotation-guidelines) section. ### Data Instances An example of a dataset instance in Argilla looks as follows: ```json { "external_id": null, "fields": { "context": null, "prompt": "Can brain cells move? By movement I mean long distance migration (preferably within the brain only)." }, "metadata": { "entropy": 0.4352176404374839, "flesch_reading_ease": 82.39000000000001, "n_characters": 85, "passed_quality_check": "True" }, "responses": [], "suggestions": [ { "agent": null, "question_name": "response", "score": null, "type": null, "value": "The question is relatively broad and one should take into account that the brain not only consists of neurons, but also glial cells (supportive cells) and pre-mitotic neuronal stem cells. Furthermore, as critical fellow-scientists have indicated, developmental stage is very important, as the developing embryonic brain is very different from the adult brain.\nHowever, after sifting through various publications, the answer to the question is actually remarkably simple: Yes, brain cells migrate.\nIn the adult brain glial cells migrate in the brain (Kl\u00e4mbt, 2009). Glial cells are involved in a myriad of functions, but a notable example of migrating glial cells are the oligodendrocytes that migrate relative long distances to find their target axons onto which they wrap themselves to form the insulating myelin sheath (Tsai and Miller, 2002).\nNeuronal stem cells migrate over long distances in response to injury (Imitola et al., 2004) and they migrate from specific stem-cell locations (e.g., hippocampus and subventricular zone) to other regions (Clarke, 2003).\nPost-mitotic, but non-differentiated neurons have been shown to migrate in the adult brain in fish (Scott et al., 2012), and in mammals and non-human primates as well (Sawada et al., 2011).\nNot surprisingly, glial cells, stem cells and neurons also migrate during embryonic development. Most notably, post-mitotic neurons destined to fulfill peripheral functions have to migrate over relatively long distances from the neural crest to their target locations (Neuroscience, 2nd ed, Neuronal Migration)." } ] } ``` While the same record in HuggingFace `datasets` looks as follows: ```json { "context": null, "external_id": null, "metadata": "{\"n_characters\": 85, \"passed_quality_check\": \"True\", \"flesch_reading_ease\": 82.39000000000001, \"entropy\": 0.4352176404374839}", "prompt": "Can brain cells move? By movement I mean long distance migration (preferably within the brain only).", "response": [], "response-suggestion": "The question is relatively broad and one should take into account that the brain not only consists of neurons, but also glial cells (supportive cells) and pre-mitotic neuronal stem cells. Furthermore, as critical fellow-scientists have indicated, developmental stage is very important, as the developing embryonic brain is very different from the adult brain.\nHowever, after sifting through various publications, the answer to the question is actually remarkably simple: Yes, brain cells migrate.\nIn the adult brain glial cells migrate in the brain (Kl\u00e4mbt, 2009). Glial cells are involved in a myriad of functions, but a notable example of migrating glial cells are the oligodendrocytes that migrate relative long distances to find their target axons onto which they wrap themselves to form the insulating myelin sheath (Tsai and Miller, 2002).\nNeuronal stem cells migrate over long distances in response to injury (Imitola et al., 2004) and they migrate from specific stem-cell locations (e.g., hippocampus and subventricular zone) to other regions (Clarke, 2003).\nPost-mitotic, but non-differentiated neurons have been shown to migrate in the adult brain in fish (Scott et al., 2012), and in mammals and non-human primates as well (Sawada et al., 2011).\nNot surprisingly, glial cells, stem cells and neurons also migrate during embryonic development. Most notably, post-mitotic neurons destined to fulfill peripheral functions have to migrate over relatively long distances from the neural crest to their target locations (Neuroscience, 2nd ed, Neuronal Migration).", "response-suggestion-metadata": { "agent": null, "score": null, "type": null } } ``` ### Data Fields Among the dataset fields, we differentiate between the following: * **Fields:** These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. * **prompt** is of type `FieldTypes.text`. * (optional) **context** is of type `FieldTypes.text`. * **Questions:** These are the questions that will be asked to the annotators. They can be of different types, such as `RatingQuestion`, `TextQuestion`, `LabelQuestion`, `MultiLabelQuestion`, and `RankingQuestion`. * **response** is of type `QuestionTypes.text`. * **Suggestions:** As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. * (optional) **response-suggestion** is of type `QuestionTypes.text`. Additionally, we also have two more fields that are optional and are the following: * **✨ NEW** **metadata:** This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. * **external_id:** This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is `train`. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation guidelines This is a supervised fine-tuning dataset that contains instructions. Please write the response to the instruction in the response field. Take the context into account when writing the response. #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
nataliaElv/text-descriptives-metadata
[ "size_categories:1K<n<10K", "rlfh", "argilla", "human-feedback", "region:us" ]
2023-10-27T16:37:08+00:00
{"size_categories": "1K<n<10K", "tags": ["rlfh", "argilla", "human-feedback"]}
2023-10-27T16:37:10+00:00
[]
[]
TAGS #size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us
Dataset Card for text-descriptives-metadata =========================================== This dataset has been created with Argilla. As shown in the sections below, this dataset can be loaded into Argilla as explained in Load with Argilla, or used directly with the 'datasets' library in Load with 'datasets'. Dataset Description ------------------- * Homepage: URL * Repository: URL * Paper: * Leaderboard: * Point of Contact: ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\_huggingface' method in Argilla. * Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\_huggingface' and can be loaded independently using the 'datasets' library via 'load\_dataset'. * The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code: ### Load with 'datasets' To load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code: ### Supported Tasks and Leaderboards This dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section. There are no leaderboards associated with this dataset. ### Languages Dataset Structure ----------------- ### Data in Argilla The dataset is created in Argilla with: fields, questions, suggestions, metadata, and guidelines. The fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. The questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\_selection, multi\_label\_selection, or ranking. The suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". NEW The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. The guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section. ### Data Instances An example of a dataset instance in Argilla looks as follows: While the same record in HuggingFace 'datasets' looks as follows: ### Data Fields Among the dataset fields, we differentiate between the following: * Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. + prompt is of type 'URL'. + (optional) context is of type 'URL'. * Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'. + response is of type 'URL'. * Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. + (optional) response-suggestion is of type 'URL'. Additionally, we also have two more fields that are optional and are the following: * NEW metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. * external\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is 'train'. Dataset Creation ---------------- ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation guidelines This is a supervised fine-tuning dataset that contains instructions. Please write the response to the instruction in the response field. Take the context into account when writing the response. #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information ### Contributions
[ "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nNEW The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ prompt is of type 'URL'.\n\t+ (optional) context is of type 'URL'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ response is of type 'URL'.\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) response-suggestion is of type 'URL'.\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* NEW metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines\n\n\nThis is a supervised fine-tuning dataset that contains instructions. Please write the response to the instruction in the response field. Take the context into account when writing the response.", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us \n", "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nNEW The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ prompt is of type 'URL'.\n\t+ (optional) context is of type 'URL'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ response is of type 'URL'.\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) response-suggestion is of type 'URL'.\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* NEW metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines\n\n\nThis is a supervised fine-tuning dataset that contains instructions. Please write the response to the instruction in the response field. Take the context into account when writing the response.", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 29, 162, 40, 53, 68, 11, 402, 40, 452, 27, 7, 4, 10, 10, 5, 45, 5, 9, 18, 7, 8, 14, 6, 6, 5 ]
[ "passage: TAGS\n#size_categories-1K<n<10K #rlfh #argilla #human-feedback #region-us \n### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.### Languages\n\n\nDataset Structure\n-----------------", "passage: ### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nNEW The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ prompt is of type 'URL'.\n\t+ (optional) context is of type 'URL'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ response is of type 'URL'.\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) response-suggestion is of type 'URL'.\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* NEW metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file." ]
797aa58b37a8a3c359f8347da486ceac8706941a
# Dataset Card for "babylm-10M" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
deven367/babylm-10M
[ "region:us" ]
2023-10-27T16:40:02+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "valid", "path": "data/valid-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 57630095, "num_examples": 1015521}, {"name": "valid", "num_bytes": 54930583, "num_examples": 986022}, {"name": "test", "num_bytes": 59992087, "num_examples": 1008854}], "download_size": 108516100, "dataset_size": 172552765}}
2023-10-27T16:40:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "babylm-10M" More Information needed
[ "# Dataset Card for \"babylm-10M\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"babylm-10M\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"babylm-10M\"\n\nMore Information needed" ]
e94e3e0656c2f57e99194f73f91b2d149ca60edd
# Dataset Card for "finetune_run2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sayan1101/finetune_run2
[ "region:us" ]
2023-10-27T16:51:28+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "text", "struct": [{"name": "text", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 1185515655, "num_examples": 2585615}], "download_size": 667868561, "dataset_size": 1185515655}}
2023-10-27T17:11:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "finetune_run2" More Information needed
[ "# Dataset Card for \"finetune_run2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"finetune_run2\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"finetune_run2\"\n\nMore Information needed" ]
0a05b8c2601dc6464543f5994d44e49bdbfc4dc3
# Dataset Card for "soict_train_dataset_v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
thanhduycao/soict_train_dataset_v2
[ "region:us" ]
2023-10-27T17:01:19+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "audio", "struct": [{"name": "array", "sequence": "float64"}, {"name": "path", "dtype": "string"}, {"name": "sampling_rate", "dtype": "int64"}]}, {"name": "sentence_norm", "dtype": "string"}, {"name": "wer", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 4196405867, "num_examples": 8181}, {"name": "test", "num_bytes": 565495055, "num_examples": 1092}], "download_size": 1121417074, "dataset_size": 4761900922}}
2023-10-27T17:02:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "soict_train_dataset_v2" More Information needed
[ "# Dataset Card for \"soict_train_dataset_v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"soict_train_dataset_v2\"\n\nMore Information needed" ]
[ 6, 22 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"soict_train_dataset_v2\"\n\nMore Information needed" ]
2f50f2daa7f8b8f8e69af33fd72b4394220b8e2b
# Dataset Card for "cancer_image_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vigneshm1995/cancer_image_dataset
[ "region:us" ]
2023-10-27T17:12:28+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 3807590243.884, "num_examples": 54706}], "download_size": 2823547183, "dataset_size": 3807590243.884}}
2023-10-27T17:20:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cancer_image_dataset" More Information needed
[ "# Dataset Card for \"cancer_image_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cancer_image_dataset\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cancer_image_dataset\"\n\nMore Information needed" ]
7c558f1eb354824e60c1799770d7b8d1f68e7b70
# Dataset Card for "filtered_finetune_run2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sayan1101/filtered_finetune_run2
[ "region:us" ]
2023-10-27T17:21:59+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1195434678, "num_examples": 2585534}], "download_size": 668295236, "dataset_size": 1195434678}}
2023-10-27T18:47:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "filtered_finetune_run2" More Information needed
[ "# Dataset Card for \"filtered_finetune_run2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"filtered_finetune_run2\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"filtered_finetune_run2\"\n\nMore Information needed" ]
24c42ed79d004927b9d5498fff90efb0691ffda5
# Dataset Card for "pubmed-kinase-abstract" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
RF0000/pubmed-kinase-abstract
[ "language:en", "license:mit", "region:us" ]
2023-10-27T17:23:14+00:00
{"language": ["en"], "license": "mit", "dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "Title", "dtype": "string"}, {"name": "Abstract", "dtype": "string"}, {"name": "Journal", "dtype": "string"}, {"name": "Language", "dtype": "string"}, {"name": "Year", "dtype": "string"}, {"name": "Month", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 11898592, "num_examples": 9999}], "download_size": 6290504, "dataset_size": 11898592}}
2023-10-27T19:11:43+00:00
[]
[ "en" ]
TAGS #language-English #license-mit #region-us
# Dataset Card for "pubmed-kinase-abstract" More Information needed
[ "# Dataset Card for \"pubmed-kinase-abstract\"\n\nMore Information needed" ]
[ "TAGS\n#language-English #license-mit #region-us \n", "# Dataset Card for \"pubmed-kinase-abstract\"\n\nMore Information needed" ]
[ 15, 19 ]
[ "passage: TAGS\n#language-English #license-mit #region-us \n# Dataset Card for \"pubmed-kinase-abstract\"\n\nMore Information needed" ]
bdebdada7249fd9b68c972c46f9f65e0545c3d7f
**General description**: This dataset comprisses a set of tweets crawled during the COVID-19 pandemic (from March 2020 to June 2021). Tweets are located in two different regions: Spain and USA. This adds value to the collection, as it contains data in two languages. This data was used as part of a broader study that aimed to determine the evolution of different personality traits and disorders during the pandemic. Thus, weak labels for different dimensions, such as sentiment, personality prevalence, and others, are also available. Further details about this experimentation can be found in the [paper](https://link.springer.com/article/10.1007/s10844-023-00810-3) or [Github](https://github.com/MarcosFP97/COVID-19-Personality). **Data**: A sample of the data can be visualised and downloaded from this card. More specifically, it corresponds to the month of January 2021 and tweets are located on USA. Tweets were anonymized for privacy reasons. The whole dataset is available upon request to fullfil Twitter's restrictions. You can contact either with [email protected] or [email protected] to obtain it. **Citation**: For all the future studies using our data, we kindly ask to quote our paper: @article{fernandez2023personality, \ title={Personality trait analysis during the COVID-19 pandemic: a comparative study on social media}, \ author={Fern{\'a}ndez-Pichel, Marcos and Arag{\'o}n, Mario Ezra and Saborido-Pati{\~n}o, Juli{\'a}n and Losada, David E}, \ journal={Journal of Intelligent Information Systems}, \ pages={1--26}, \ year={2023}, \ publisher={Springer} \ }
citiusLTL/Twitter-COVID-19
[ "task_categories:text-classification", "language:es", "language:en", "license:gpl-3.0", "region:us" ]
2023-10-27T17:30:15+00:00
{"language": ["es", "en"], "license": "gpl-3.0", "task_categories": ["text-classification"]}
2023-10-27T17:35:38+00:00
[]
[ "es", "en" ]
TAGS #task_categories-text-classification #language-Spanish #language-English #license-gpl-3.0 #region-us
General description: This dataset comprisses a set of tweets crawled during the COVID-19 pandemic (from March 2020 to June 2021). Tweets are located in two different regions: Spain and USA. This adds value to the collection, as it contains data in two languages. This data was used as part of a broader study that aimed to determine the evolution of different personality traits and disorders during the pandemic. Thus, weak labels for different dimensions, such as sentiment, personality prevalence, and others, are also available. Further details about this experimentation can be found in the paper or Github. Data: A sample of the data can be visualised and downloaded from this card. More specifically, it corresponds to the month of January 2021 and tweets are located on USA. Tweets were anonymized for privacy reasons. The whole dataset is available upon request to fullfil Twitter's restrictions. You can contact either with URL@URL or URL@URL to obtain it. Citation: For all the future studies using our data, we kindly ask to quote our paper: @article{fernandez2023personality, \ title={Personality trait analysis during the COVID-19 pandemic: a comparative study on social media}, \ author={Fern{\'a}ndez-Pichel, Marcos and Arag{\'o}n, Mario Ezra and Saborido-Pati{\~n}o, Juli{\'a}n and Losada, David E}, \ journal={Journal of Intelligent Information Systems}, \ pages={1--26}, \ year={2023}, \ publisher={Springer} \ }
[]
[ "TAGS\n#task_categories-text-classification #language-Spanish #language-English #license-gpl-3.0 #region-us \n" ]
[ 34 ]
[ "passage: TAGS\n#task_categories-text-classification #language-Spanish #language-English #license-gpl-3.0 #region-us \n" ]
15010e7b02dc960744d75d63bfe541d8a0b2a394
# Dataset Card for "libriphrase_meta" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
charsiu/libriphrase_meta
[ "region:us" ]
2023-10-27T17:30:29+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "anchor", "dtype": "string"}, {"name": "anchor_spk", "dtype": "int64"}, {"name": "anchor_text", "dtype": "string"}, {"name": "anchor_dur", "dtype": "float64"}, {"name": "comparison", "dtype": "string"}, {"name": "comparison_spk", "dtype": "int64"}, {"name": "comparison_text", "dtype": "string"}, {"name": "comparison_dur", "dtype": "float64"}, {"name": "type", "dtype": "string"}, {"name": "target", "dtype": "int64"}, {"name": "class", "dtype": "int64"}, {"name": "anchor_phone", "dtype": "string"}, {"name": "comparison_phone", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 53970720, "num_examples": 203013}], "download_size": 8382220, "dataset_size": 53970720}}
2023-10-27T17:33:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for "libriphrase_meta" More Information needed
[ "# Dataset Card for \"libriphrase_meta\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"libriphrase_meta\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"libriphrase_meta\"\n\nMore Information needed" ]
7ef3561c2950adf5f490887a5bad586700767e67
# Dataset Card for "chemnlp-chemdner" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kjappelbaum/chemnlp-chemdner
[ "region:us" ]
2023-10-27T17:39:06+00:00
{"dataset_info": {"features": [{"name": "entities", "sequence": "string"}, {"name": "text", "dtype": "string"}, {"name": "split", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 14376666, "num_examples": 19440}], "download_size": 8033115, "dataset_size": 14376666}}
2023-10-27T18:07:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "chemnlp-chemdner" More Information needed
[ "# Dataset Card for \"chemnlp-chemdner\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"chemnlp-chemdner\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"chemnlp-chemdner\"\n\nMore Information needed" ]
a32fed0deecbcdedc7371b9e4061ba20aba47fbd
# Dataset Card for "sentence_augmented" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
anlp/sentence_augmented
[ "region:us" ]
2023-10-27T17:41:45+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "sentences", "sequence": "string"}, {"name": "new_gt", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1189532, "num_examples": 251}], "download_size": 237012, "dataset_size": 1189532}}
2023-10-27T17:41:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sentence_augmented" More Information needed
[ "# Dataset Card for \"sentence_augmented\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sentence_augmented\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"sentence_augmented\"\n\nMore Information needed" ]
f96a1ccc2a0aa6d017359571b0a2b7fc4562fdbb
# Dataset Card for "wiki40b-da" ## Dataset Description - **Point of Contact:** [Dan Saattrup Nielsen](mailto:[email protected]) - **Size of downloaded dataset files:** 150.57 MB - **Size of the generated dataset:** 246.09 MB - **Total amount of disk used:** 396.66 MB ### Dataset Summary This dataset is an upload of the Danish part of the [Wiki40b dataset](https://aclanthology.org/2020.lrec-1.297), being a cleaned version of a dump of Wikipedia. The dataset is identical in content to [this dataset on the Hugging Face Hub](https://huggingface.co/datasets/wiki40b), but that one requires both `apache_beam`, `tensorflow` and `mwparserfromhell`, which can lead to dependency issues since these are not compatible with several newer packages. The training, validation and test splits are the original ones. ### Languages The dataset is available in Danish (`da`). ## Dataset Structure ### Data Instances - **Size of downloaded dataset files:** 150.57 MB - **Size of the generated dataset:** 246.09 MB - **Total amount of disk used:** 396.66 MB An example from the dataset looks as follows. ``` { 'wikidata_id': 'Q17341862', 'text': "\n_START_ARTICLE_\nÆgyptiske tekstiler\n_START_PARAGRAPH_\nTekstiler havde mange (...)", 'version_id': '9018011197452276273' } ``` ### Data Fields The data fields are the same among all splits. - `wikidata_id`: a `string` feature. - `text`: a `string` feature. - `version_id`: a `string` feature. ### Dataset Statistics There are 109,486 samples in the training split, 6,173 samples in the validation split and 6,219 in the test split. #### Document Length Distribution ![image/png](https://cdn-uploads.huggingface.co/production/uploads/60d368a613f774189902f555/dn-7_ugJObyF-CkD6XoO-.png) ## Additional Information ### Dataset Curators [Dan Saattrup Nielsen](https://saattrupdan.github.io/) from the [The Alexandra Institute](https://alexandra.dk/) uploaded it to the Hugging Face Hub. ### Licensing Information The dataset is licensed under the [CC-BY-SA license](https://creativecommons.org/licenses/by-sa/4.0/).
alexandrainst/wiki40b-da
[ "task_categories:text-generation", "size_categories:100K<n<1M", "language:da", "license:cc-by-sa-4.0", "region:us" ]
2023-10-27T17:47:11+00:00
{"language": ["da"], "license": "cc-by-sa-4.0", "size_categories": ["100K<n<1M"], "task_categories": ["text-generation"], "pretty_name": "Wiki40b-da", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "wikidata_id", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "version_id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 220855898, "num_examples": 109486}, {"name": "validation", "num_bytes": 12416304, "num_examples": 6173}, {"name": "test", "num_bytes": 12818380, "num_examples": 6219}], "download_size": 150569852, "dataset_size": 246090582}}
2023-10-27T18:08:09+00:00
[]
[ "da" ]
TAGS #task_categories-text-generation #size_categories-100K<n<1M #language-Danish #license-cc-by-sa-4.0 #region-us
# Dataset Card for "wiki40b-da" ## Dataset Description - Point of Contact: Dan Saattrup Nielsen - Size of downloaded dataset files: 150.57 MB - Size of the generated dataset: 246.09 MB - Total amount of disk used: 396.66 MB ### Dataset Summary This dataset is an upload of the Danish part of the Wiki40b dataset, being a cleaned version of a dump of Wikipedia. The dataset is identical in content to this dataset on the Hugging Face Hub, but that one requires both 'apache_beam', 'tensorflow' and 'mwparserfromhell', which can lead to dependency issues since these are not compatible with several newer packages. The training, validation and test splits are the original ones. ### Languages The dataset is available in Danish ('da'). ## Dataset Structure ### Data Instances - Size of downloaded dataset files: 150.57 MB - Size of the generated dataset: 246.09 MB - Total amount of disk used: 396.66 MB An example from the dataset looks as follows. ### Data Fields The data fields are the same among all splits. - 'wikidata_id': a 'string' feature. - 'text': a 'string' feature. - 'version_id': a 'string' feature. ### Dataset Statistics There are 109,486 samples in the training split, 6,173 samples in the validation split and 6,219 in the test split. #### Document Length Distribution !image/png ## Additional Information ### Dataset Curators Dan Saattrup Nielsen from the The Alexandra Institute uploaded it to the Hugging Face Hub. ### Licensing Information The dataset is licensed under the CC-BY-SA license.
[ "# Dataset Card for \"wiki40b-da\"", "## Dataset Description\n\n- Point of Contact: Dan Saattrup Nielsen\n- Size of downloaded dataset files: 150.57 MB\n- Size of the generated dataset: 246.09 MB\n- Total amount of disk used: 396.66 MB", "### Dataset Summary\n\nThis dataset is an upload of the Danish part of the Wiki40b dataset, being a cleaned version of a dump of Wikipedia.\n\nThe dataset is identical in content to this dataset on the Hugging Face Hub, but that one requires both 'apache_beam', 'tensorflow' and 'mwparserfromhell', which can lead to dependency issues since these are not compatible with several newer packages.\n\nThe training, validation and test splits are the original ones.", "### Languages\n\nThe dataset is available in Danish ('da').", "## Dataset Structure", "### Data Instances\n\n- Size of downloaded dataset files: 150.57 MB\n- Size of the generated dataset: 246.09 MB\n- Total amount of disk used: 396.66 MB\n\nAn example from the dataset looks as follows.", "### Data Fields\n\nThe data fields are the same among all splits.\n\n- 'wikidata_id': a 'string' feature.\n- 'text': a 'string' feature.\n- 'version_id': a 'string' feature.", "### Dataset Statistics\n\nThere are 109,486 samples in the training split, 6,173 samples in the validation split and 6,219 in the test split.", "#### Document Length Distribution\n\n!image/png", "## Additional Information", "### Dataset Curators\n\nDan Saattrup Nielsen from the The Alexandra\nInstitute uploaded it to the Hugging Face Hub.", "### Licensing Information\n\nThe dataset is licensed under the CC-BY-SA\nlicense." ]
[ "TAGS\n#task_categories-text-generation #size_categories-100K<n<1M #language-Danish #license-cc-by-sa-4.0 #region-us \n", "# Dataset Card for \"wiki40b-da\"", "## Dataset Description\n\n- Point of Contact: Dan Saattrup Nielsen\n- Size of downloaded dataset files: 150.57 MB\n- Size of the generated dataset: 246.09 MB\n- Total amount of disk used: 396.66 MB", "### Dataset Summary\n\nThis dataset is an upload of the Danish part of the Wiki40b dataset, being a cleaned version of a dump of Wikipedia.\n\nThe dataset is identical in content to this dataset on the Hugging Face Hub, but that one requires both 'apache_beam', 'tensorflow' and 'mwparserfromhell', which can lead to dependency issues since these are not compatible with several newer packages.\n\nThe training, validation and test splits are the original ones.", "### Languages\n\nThe dataset is available in Danish ('da').", "## Dataset Structure", "### Data Instances\n\n- Size of downloaded dataset files: 150.57 MB\n- Size of the generated dataset: 246.09 MB\n- Total amount of disk used: 396.66 MB\n\nAn example from the dataset looks as follows.", "### Data Fields\n\nThe data fields are the same among all splits.\n\n- 'wikidata_id': a 'string' feature.\n- 'text': a 'string' feature.\n- 'version_id': a 'string' feature.", "### Dataset Statistics\n\nThere are 109,486 samples in the training split, 6,173 samples in the validation split and 6,219 in the test split.", "#### Document Length Distribution\n\n!image/png", "## Additional Information", "### Dataset Curators\n\nDan Saattrup Nielsen from the The Alexandra\nInstitute uploaded it to the Hugging Face Hub.", "### Licensing Information\n\nThe dataset is licensed under the CC-BY-SA\nlicense." ]
[ 45, 12, 52, 116, 16, 6, 55, 55, 38, 11, 5, 26, 21 ]
[ "passage: TAGS\n#task_categories-text-generation #size_categories-100K<n<1M #language-Danish #license-cc-by-sa-4.0 #region-us \n# Dataset Card for \"wiki40b-da\"## Dataset Description\n\n- Point of Contact: Dan Saattrup Nielsen\n- Size of downloaded dataset files: 150.57 MB\n- Size of the generated dataset: 246.09 MB\n- Total amount of disk used: 396.66 MB### Dataset Summary\n\nThis dataset is an upload of the Danish part of the Wiki40b dataset, being a cleaned version of a dump of Wikipedia.\n\nThe dataset is identical in content to this dataset on the Hugging Face Hub, but that one requires both 'apache_beam', 'tensorflow' and 'mwparserfromhell', which can lead to dependency issues since these are not compatible with several newer packages.\n\nThe training, validation and test splits are the original ones.### Languages\n\nThe dataset is available in Danish ('da').## Dataset Structure### Data Instances\n\n- Size of downloaded dataset files: 150.57 MB\n- Size of the generated dataset: 246.09 MB\n- Total amount of disk used: 396.66 MB\n\nAn example from the dataset looks as follows.### Data Fields\n\nThe data fields are the same among all splits.\n\n- 'wikidata_id': a 'string' feature.\n- 'text': a 'string' feature.\n- 'version_id': a 'string' feature.### Dataset Statistics\n\nThere are 109,486 samples in the training split, 6,173 samples in the validation split and 6,219 in the test split.#### Document Length Distribution\n\n!image/png## Additional Information### Dataset Curators\n\nDan Saattrup Nielsen from the The Alexandra\nInstitute uploaded it to the Hugging Face Hub.### Licensing Information\n\nThe dataset is licensed under the CC-BY-SA\nlicense." ]
eba9a79320dfecb237a0d52ff85063faf65b12d4
# Dataset Card for "lex_glue" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
akkasi/lex_glue
[ "region:us" ]
2023-10-27T18:02:08+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 347116027, "num_examples": 44000}, {"name": "test", "num_bytes": 85707422, "num_examples": 11000}], "download_size": 166893046, "dataset_size": 432823449}}
2023-10-27T18:02:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lex_glue" More Information needed
[ "# Dataset Card for \"lex_glue\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lex_glue\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"lex_glue\"\n\nMore Information needed" ]
479ccdbdbfbff18e2ed62eb629fbb565f2b0b5d3
# Dataset Card for "java_repo_star" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
api-misuse/java_repo_star
[ "region:us" ]
2023-10-27T18:20:41+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "repo_name", "dtype": "string"}, {"name": "stars_count", "dtype": "int64"}, {"name": "repo_head_hexsha", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 767389.0, "num_examples": 9641}], "download_size": 652097, "dataset_size": 767389.0}}
2023-10-27T18:27:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "java_repo_star" More Information needed
[ "# Dataset Card for \"java_repo_star\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"java_repo_star\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"java_repo_star\"\n\nMore Information needed" ]
d726f0031374054b4b10f2467bd4406d2e0d45b6
# Dataset Card for "palabrero-guc-draft" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
orkidea/palabrero-guc-draft
[ "region:us" ]
2023-10-27T20:19:21+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "transcription", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 62556423.0, "num_examples": 17}], "download_size": 60689485, "dataset_size": 62556423.0}}
2023-10-28T17:57:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "palabrero-guc-draft" More Information needed
[ "# Dataset Card for \"palabrero-guc-draft\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"palabrero-guc-draft\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"palabrero-guc-draft\"\n\nMore Information needed" ]
03c2bd4d541a08fd47fee79efab7450014b76e99
## Dataset Description - **Repository:** [openai/gpt2](https://github.com/openai/gpt-2) - **Paper:** Radford et al. [Language Models are Unsupervised Multitask Learners](https://d4mucfpksywv.cloudfront.net/better-language-models/language-models.pdf) ### Dataset Summary This is the LAMBADA test split modified for bidirectional language models (for example BERT). The original is appended by punctuation symbols (for example `."`), as predicted by GPT-2 (small). The original is the LAMBADA test split [as pre-processed by OpenAI](https://huggingface.co/datasets/EleutherAI/lambada_openai), LAMBADA is used to evaluate the capabilities of computational models for text understanding by means of a word prediction task. LAMBADA is a collection of narrative texts sharing the characteristic that human subjects are able to guess their last word if they are exposed to the whole text, but not if they only see the last sentence preceding the target word. To succeed on LAMBADA, computational models cannot simply rely on local context, but must be able to keep track of information in the broader discourse. ### Languages English ### Source Data [EleutherAI/lambada_openai](https://huggingface.co/datasets/EleutherAI/lambada_openai) ### Licensing License: [Modified MIT](https://github.com/openai/gpt-2/blob/master/LICENSE) ### Citation ```bibtex @article{radford2019language, title={Language Models are Unsupervised Multitask Learners}, author={Radford, Alec and Wu, Jeff and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya}, year={2019} } ``` ```bibtex @misc{ author={Paperno, Denis and Kruszewski, Germán and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fernández, Raquel}, title={The LAMBADA dataset}, DOI={10.5281/zenodo.2630551}, publisher={Zenodo}, year={2016}, month={Aug} } ```
ltg/lambada-context
[ "task_categories:text-generation", "size_categories:1K<n<10K", "source_datasets:https://huggingface.co/datasets/EleutherAI/lambada_openai", "language:en", "license:mit", "region:us" ]
2023-10-27T20:20:16+00:00
{"language": ["en"], "license": "mit", "size_categories": ["1K<n<10K"], "source_datasets": ["https://huggingface.co/datasets/EleutherAI/lambada_openai"], "task_categories": ["text-generation"], "pretty_name": "LAMBADA"}
2023-10-30T10:53:07+00:00
[]
[ "en" ]
TAGS #task_categories-text-generation #size_categories-1K<n<10K #source_datasets-https-//huggingface.co/datasets/EleutherAI/lambada_openai #language-English #license-mit #region-us
## Dataset Description - Repository: openai/gpt2 - Paper: Radford et al. Language Models are Unsupervised Multitask Learners ### Dataset Summary This is the LAMBADA test split modified for bidirectional language models (for example BERT). The original is appended by punctuation symbols (for example '."'), as predicted by GPT-2 (small). The original is the LAMBADA test split as pre-processed by OpenAI, LAMBADA is used to evaluate the capabilities of computational models for text understanding by means of a word prediction task. LAMBADA is a collection of narrative texts sharing the characteristic that human subjects are able to guess their last word if they are exposed to the whole text, but not if they only see the last sentence preceding the target word. To succeed on LAMBADA, computational models cannot simply rely on local context, but must be able to keep track of information in the broader discourse. ### Languages English ### Source Data EleutherAI/lambada_openai ### Licensing License: Modified MIT
[ "## Dataset Description\n\n- Repository: openai/gpt2\n- Paper: Radford et al. Language Models are Unsupervised Multitask Learners", "### Dataset Summary\n\nThis is the LAMBADA test split modified for bidirectional language models (for example BERT). The original is appended by punctuation symbols (for example '.\"'), as predicted by GPT-2 (small). The original is the LAMBADA test split as pre-processed by OpenAI,\n\nLAMBADA is used to evaluate the capabilities of computational models for text understanding by means of a word prediction task. LAMBADA is a collection of narrative texts sharing the characteristic that human subjects are able to guess their last word if they are exposed to the whole text, but not if they only see the last sentence preceding the target word. To succeed on LAMBADA, computational models cannot simply rely on local context, but must be able to keep track of information in the broader discourse.", "### Languages\n\nEnglish", "### Source Data\n\nEleutherAI/lambada_openai", "### Licensing\n\nLicense: Modified MIT" ]
[ "TAGS\n#task_categories-text-generation #size_categories-1K<n<10K #source_datasets-https-//huggingface.co/datasets/EleutherAI/lambada_openai #language-English #license-mit #region-us \n", "## Dataset Description\n\n- Repository: openai/gpt2\n- Paper: Radford et al. Language Models are Unsupervised Multitask Learners", "### Dataset Summary\n\nThis is the LAMBADA test split modified for bidirectional language models (for example BERT). The original is appended by punctuation symbols (for example '.\"'), as predicted by GPT-2 (small). The original is the LAMBADA test split as pre-processed by OpenAI,\n\nLAMBADA is used to evaluate the capabilities of computational models for text understanding by means of a word prediction task. LAMBADA is a collection of narrative texts sharing the characteristic that human subjects are able to guess their last word if they are exposed to the whole text, but not if they only see the last sentence preceding the target word. To succeed on LAMBADA, computational models cannot simply rely on local context, but must be able to keep track of information in the broader discourse.", "### Languages\n\nEnglish", "### Source Data\n\nEleutherAI/lambada_openai", "### Licensing\n\nLicense: Modified MIT" ]
[ 68, 36, 187, 5, 14, 10 ]
[ "passage: TAGS\n#task_categories-text-generation #size_categories-1K<n<10K #source_datasets-https-//huggingface.co/datasets/EleutherAI/lambada_openai #language-English #license-mit #region-us \n## Dataset Description\n\n- Repository: openai/gpt2\n- Paper: Radford et al. Language Models are Unsupervised Multitask Learners### Dataset Summary\n\nThis is the LAMBADA test split modified for bidirectional language models (for example BERT). The original is appended by punctuation symbols (for example '.\"'), as predicted by GPT-2 (small). The original is the LAMBADA test split as pre-processed by OpenAI,\n\nLAMBADA is used to evaluate the capabilities of computational models for text understanding by means of a word prediction task. LAMBADA is a collection of narrative texts sharing the characteristic that human subjects are able to guess their last word if they are exposed to the whole text, but not if they only see the last sentence preceding the target word. To succeed on LAMBADA, computational models cannot simply rely on local context, but must be able to keep track of information in the broader discourse.### Languages\n\nEnglish### Source Data\n\nEleutherAI/lambada_openai### Licensing\n\nLicense: Modified MIT" ]
4b19953af454ed336c9a37912a1d8c870577ee3b
# Dataset Card for "multi_eurlex_en" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
akkasi/multi_eurlex_en
[ "region:us" ]
2023-10-27T20:36:29+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "sequence": "float64"}, {"name": "label2idx", "dtype": "string"}, {"name": "idx2label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 427978199, "num_examples": 55000}, {"name": "test", "num_bytes": 62473199, "num_examples": 5000}, {"name": "validation", "num_bytes": 45019649, "num_examples": 5000}], "download_size": 206742924, "dataset_size": 535471047}}
2023-10-28T17:43:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "multi_eurlex_en" More Information needed
[ "# Dataset Card for \"multi_eurlex_en\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"multi_eurlex_en\"\n\nMore Information needed" ]
[ 6, 16 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"multi_eurlex_en\"\n\nMore Information needed" ]
f6025b20a80c6ea9e5b7b25c75fe8cfb7e8d5407
# Dataset Card for "test_1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ayoub999/test_1
[ "region:us" ]
2023-10-27T20:38:49+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "bboxes", "sequence": {"sequence": "int64"}}, {"name": "ner_tags", "sequence": {"class_label": {"names": {"0": "O", "1": "Ref", "2": "NumFa", "3": "Fourniss", "4": "DateFa", "5": "DateLim", "6": "TotalHT", "7": "TVA", "8": "TotalTTc", "9": "unitP", "10": "Qt", "11": "TVAP", "12": "descp"}}}}, {"name": "tokens", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 470848.6666666667, "num_examples": 2}, {"name": "test", "num_bytes": 184985.0, "num_examples": 1}], "download_size": 678107, "dataset_size": 655833.6666666667}}
2023-10-28T16:07:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test_1" More Information needed
[ "# Dataset Card for \"test_1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test_1\"\n\nMore Information needed" ]
[ 6, 12 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test_1\"\n\nMore Information needed" ]
f8d3ce1a938ed6e8aae2577ff20d6d3eab564a1e
Original, raw data can be found in Gene Expression Omnibus (GEO) https://www.ncbi.nlm.nih.gov/geo/
mwinn99/GPL7202
[ "size_categories:10K<n<100K", "license:odbl", "biology", "region:us" ]
2023-10-27T22:33:58+00:00
{"license": "odbl", "size_categories": ["10K<n<100K"], "tags": ["biology"]}
2024-01-05T21:25:25+00:00
[]
[]
TAGS #size_categories-10K<n<100K #license-odbl #biology #region-us
Original, raw data can be found in Gene Expression Omnibus (GEO) URL
[]
[ "TAGS\n#size_categories-10K<n<100K #license-odbl #biology #region-us \n" ]
[ 27 ]
[ "passage: TAGS\n#size_categories-10K<n<100K #license-odbl #biology #region-us \n" ]
fff284d5a3dff98d14606f9c717c055361e0054f
Original, raw data can be found in Gene Expression Omnibus (GEO) https://www.ncbi.nlm.nih.gov/geo/
mwinn99/GPL6885
[ "size_categories:10K<n<100K", "license:odbl", "biology", "region:us" ]
2023-10-27T22:37:31+00:00
{"license": "odbl", "size_categories": ["10K<n<100K"], "tags": ["biology"]}
2024-01-05T21:21:43+00:00
[]
[]
TAGS #size_categories-10K<n<100K #license-odbl #biology #region-us
Original, raw data can be found in Gene Expression Omnibus (GEO) URL
[]
[ "TAGS\n#size_categories-10K<n<100K #license-odbl #biology #region-us \n" ]
[ 27 ]
[ "passage: TAGS\n#size_categories-10K<n<100K #license-odbl #biology #region-us \n" ]
4026796a269b914c51fef3c83af21047abf4f844
Original, raw data can be found in Gene Expression Omnibus (GEO) https://www.ncbi.nlm.nih.gov/geo/
mwinn99/GPL6887
[ "size_categories:10K<n<100K", "license:odbl", "biology", "region:us" ]
2023-10-27T22:40:06+00:00
{"license": "odbl", "size_categories": ["10K<n<100K"], "tags": ["biology"]}
2024-01-05T21:23:01+00:00
[]
[]
TAGS #size_categories-10K<n<100K #license-odbl #biology #region-us
Original, raw data can be found in Gene Expression Omnibus (GEO) URL
[]
[ "TAGS\n#size_categories-10K<n<100K #license-odbl #biology #region-us \n" ]
[ 27 ]
[ "passage: TAGS\n#size_categories-10K<n<100K #license-odbl #biology #region-us \n" ]
da232ffb693e2f589538a7b961a95c7a92058dc9
Original, raw data can be found in Gene Expression Omnibus (GEO) https://www.ncbi.nlm.nih.gov/geo/
mwinn99/GPL96
[ "size_categories:10K<n<100K", "license:odbl", "biology", "region:us" ]
2023-10-27T22:43:33+00:00
{"license": "odbl", "size_categories": ["10K<n<100K"], "tags": ["biology"]}
2024-01-05T21:26:40+00:00
[]
[]
TAGS #size_categories-10K<n<100K #license-odbl #biology #region-us
Original, raw data can be found in Gene Expression Omnibus (GEO) URL
[]
[ "TAGS\n#size_categories-10K<n<100K #license-odbl #biology #region-us \n" ]
[ 27 ]
[ "passage: TAGS\n#size_categories-10K<n<100K #license-odbl #biology #region-us \n" ]
8968bd019e4cdcd2dc606d90ed139e2c59543542
Original, raw data can be found in Gene Expression Omnibus (GEO) https://www.ncbi.nlm.nih.gov/geo/
mwinn99/GPL6947
[ "size_categories:10K<n<100K", "license:odbl", "biology", "region:us" ]
2023-10-27T22:46:55+00:00
{"license": "odbl", "size_categories": ["10K<n<100K"], "tags": ["biology"]}
2024-01-05T21:22:13+00:00
[]
[]
TAGS #size_categories-10K<n<100K #license-odbl #biology #region-us
Original, raw data can be found in Gene Expression Omnibus (GEO) URL
[]
[ "TAGS\n#size_categories-10K<n<100K #license-odbl #biology #region-us \n" ]
[ 27 ]
[ "passage: TAGS\n#size_categories-10K<n<100K #license-odbl #biology #region-us \n" ]
2675dc1d579d956f47ec647c30576ab8b35eae05
Original, raw data can be found in Gene Expression Omnibus (GEO) https://www.ncbi.nlm.nih.gov/geo/
mwinn99/GPL1261
[ "size_categories:10K<n<100K", "license:odbl", "biology", "region:us" ]
2023-10-27T22:52:20+00:00
{"license": "odbl", "size_categories": ["10K<n<100K"], "tags": ["biology"]}
2024-01-05T21:25:57+00:00
[]
[]
TAGS #size_categories-10K<n<100K #license-odbl #biology #region-us
Original, raw data can be found in Gene Expression Omnibus (GEO) URL
[]
[ "TAGS\n#size_categories-10K<n<100K #license-odbl #biology #region-us \n" ]
[ 27 ]
[ "passage: TAGS\n#size_categories-10K<n<100K #license-odbl #biology #region-us \n" ]
209ea8229026ec800feddb02ad975a4948386712
# Dataset Card for WildChat ## Dataset Description - **Paper:** https://openreview.net/forum?id=Bl8u7ZRlbM - **License:** https://allenai.org/licenses/impact-lr - **Language(s) (NLP):** multi-lingual - **Point of Contact:** [Yuntian Deng](mailto:[email protected]) ### Dataset Summary WildChat is a collection of 650K conversations between human users and ChatGPT. We collected WildChat by offering online users free access to OpenAI's GPT-3.5 and GPT-4. The dataset contains a broad spectrum of user-chatbot interactions that are not previously covered by other instruction fine-tuning datasets: for example, interactions include ambiguous user requests, code-switching, topic-switching, political discussions, etc. WildChat can serve both as a dataset for instructional fine-tuning and as a valuable resource for studying user behaviors. Note that this dataset contains toxic user inputs/ChatGPT responses. A nontoxic subset of this dataest can be found [here](https://huggingface.co/datasets/allenai/WildChat-nontoxic). WildChat has been openly released under AI2's ImpACT license as a low-risk artifact. The use of WildChat to cause harm is strictly prohibited. ### Languages 66 languages were detected in WildChat. ### Personal and Sensitive Information The data has been de-identified with Microsoft Presidio and hand-written rules by the authors. ### Data Fields - `conversation_id` (string): Each conversation has a unique id. - `model` (string): The underlying OpenAI model, such as gpt-3.5-turbo or gpt-4. - `timestamp` (timestamp): The timestamp of the last turn in the conversation in UTC. - `conversation` (list): A list of user/assistant utterances. Each utterance is a dictionary containing the `role` of the speaker (user or assistant), the `content` of the utterance, the detected `language` of the utterance, whether the content of the utterance is considered `toxic`, and whether PII has been detected and anonymized (`redacted`). - `turn` (int): The number of turns in the conversation. A turn refers to one round of user-assistant interaction. - `language` (string): The language of the conversation. Note that this is the most frequently detected language in the utterances of the conversation. - `openai_moderation` (list): A list of OpenAI Moderation results. Each element in the list corresponds to one utterance in the conversation. - `detoxify_moderation` (list): A list of Detoxify results. Each element in the list corresponds to one utterance in the conversation. - `toxic` (bool): Whether this conversation contains any utterances considered to be toxic by either OpenAI Moderation or Detoxify. - `redacted` (bool): Whether this conversation contains any utterances in which PII is detected and anonymized. ### Empty User Inputs This dataset includes a small subset of conversations where users submitted empty inputs, sometimes leading to hallucinated responses from the assistant. This issue, first noticed by @yuchenlin, arises from the design of our Huggingface chatbot used for data collection, which did not restrict the submission of empty inputs. As a result, users could submit without entering any text, causing the assistant to generate responses without any user prompts. This occurs in a small fraction of the dataset---12,405 out of 652,139 conversations. ### Licensing Information WildChat is made available under the [**AI2 ImpACT License - Low Risk Artifacts ("LR Agreement")**](https://allenai.org/licenses/impact-lr) ### Citation Information Please consider citing [our paper](https://openreview.net/forum?id=Bl8u7ZRlbM) if you find this dataset useful: ``` @inproceedings{ zhao2024inthewildchat, title={(InThe)WildChat: 570K Chat{GPT} Interaction Logs In The Wild}, author={Zhao, Wenting and Ren, Xiang and Hessel, Jack and Cardie, Claire and Choi, Yejin and Deng, Yuntian}, booktitle={The Twelfth International Conference on Learning Representations}, year={2024}, url={https://openreview.net/forum?id=Bl8u7ZRlbM} } ```
allenai/WildChat
[ "task_categories:conversational", "task_categories:text-generation", "task_categories:question-answering", "size_categories:100K<n<1M", "not-for-all-audiences", "instruction-finetuning", "region:us" ]
2023-10-27T22:53:36+00:00
{"size_categories": ["100K<n<1M"], "task_categories": ["conversational", "text-generation", "question-answering"], "pretty_name": "WildChat", "dataset_info": {"features": [{"name": "conversation_id", "dtype": "string"}, {"name": "model", "dtype": "string"}, {"name": "timestamp", "dtype": "timestamp[s, tz=UTC]"}, {"name": "conversation", "list": [{"name": "content", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "redacted", "dtype": "bool"}, {"name": "role", "dtype": "string"}, {"name": "toxic", "dtype": "bool"}]}, {"name": "turn", "dtype": "int64"}, {"name": "language", "dtype": "string"}, {"name": "openai_moderation", "list": [{"name": "categories", "struct": [{"name": "harassment", "dtype": "bool"}, {"name": "harassment/threatening", "dtype": "bool"}, {"name": "hate", "dtype": "bool"}, {"name": "hate/threatening", "dtype": "bool"}, {"name": "self-harm", "dtype": "bool"}, {"name": "self-harm/instructions", "dtype": "bool"}, {"name": "self-harm/intent", "dtype": "bool"}, {"name": "sexual", "dtype": "bool"}, {"name": "sexual/minors", "dtype": "bool"}, {"name": "violence", "dtype": "bool"}, {"name": "violence/graphic", "dtype": "bool"}]}, {"name": "category_scores", "struct": [{"name": "harassment", "dtype": "float64"}, {"name": "harassment/threatening", "dtype": "float64"}, {"name": "hate", "dtype": "float64"}, {"name": "hate/threatening", "dtype": "float64"}, {"name": "self-harm", "dtype": "float64"}, {"name": "self-harm/instructions", "dtype": "float64"}, {"name": "self-harm/intent", "dtype": "float64"}, {"name": "sexual", "dtype": "float64"}, {"name": "sexual/minors", "dtype": "float64"}, {"name": "violence", "dtype": "float64"}, {"name": "violence/graphic", "dtype": "float64"}]}, {"name": "flagged", "dtype": "bool"}]}, {"name": "detoxify_moderation", "list": [{"name": "identity_attack", "dtype": "float32"}, {"name": "insult", "dtype": "float32"}, {"name": "obscene", "dtype": "float32"}, {"name": "severe_toxicity", "dtype": "float32"}, {"name": "sexual_explicit", "dtype": "float32"}, {"name": "threat", "dtype": "float32"}, {"name": "toxicity", "dtype": "float32"}]}, {"name": "toxic", "dtype": "bool"}, {"name": "redacted", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 3900538458, "num_examples": 652139}], "download_size": 2102684185, "dataset_size": 3900538458}, "extra_gated_prompt": "Access to this dataset is automatically granted upon accepting the [**AI2 ImpACT License - Low Risk Artifacts (\u201cLR Agreement\u201d)**](https://allenai.org/licenses/impact-lr) and completing all fields below.", "extra_gated_fields": {"Your full name": "text", "Organization or entity you are affiliated with": "text", "State or country you are located in": "text", "Contact email": "text", "Please describe your intended use of the low risk artifact(s)": "text", "I AGREE to the terms and conditions of the LR Agreement above": "checkbox", "I AGREE to AI2\u2019s use of my information for legal notices and administrative matters": "checkbox", "I CERTIFY that the information I have provided is true and accurate": "checkbox"}, "tags": ["not-for-all-audiences", "instruction-finetuning"]}
2024-01-17T01:49:35+00:00
[]
[]
TAGS #task_categories-conversational #task_categories-text-generation #task_categories-question-answering #size_categories-100K<n<1M #not-for-all-audiences #instruction-finetuning #region-us
# Dataset Card for WildChat ## Dataset Description - Paper: URL - License: URL - Language(s) (NLP): multi-lingual - Point of Contact: Yuntian Deng ### Dataset Summary WildChat is a collection of 650K conversations between human users and ChatGPT. We collected WildChat by offering online users free access to OpenAI's GPT-3.5 and GPT-4. The dataset contains a broad spectrum of user-chatbot interactions that are not previously covered by other instruction fine-tuning datasets: for example, interactions include ambiguous user requests, code-switching, topic-switching, political discussions, etc. WildChat can serve both as a dataset for instructional fine-tuning and as a valuable resource for studying user behaviors. Note that this dataset contains toxic user inputs/ChatGPT responses. A nontoxic subset of this dataest can be found here. WildChat has been openly released under AI2's ImpACT license as a low-risk artifact. The use of WildChat to cause harm is strictly prohibited. ### Languages 66 languages were detected in WildChat. ### Personal and Sensitive Information The data has been de-identified with Microsoft Presidio and hand-written rules by the authors. ### Data Fields - 'conversation_id' (string): Each conversation has a unique id. - 'model' (string): The underlying OpenAI model, such as gpt-3.5-turbo or gpt-4. - 'timestamp' (timestamp): The timestamp of the last turn in the conversation in UTC. - 'conversation' (list): A list of user/assistant utterances. Each utterance is a dictionary containing the 'role' of the speaker (user or assistant), the 'content' of the utterance, the detected 'language' of the utterance, whether the content of the utterance is considered 'toxic', and whether PII has been detected and anonymized ('redacted'). - 'turn' (int): The number of turns in the conversation. A turn refers to one round of user-assistant interaction. - 'language' (string): The language of the conversation. Note that this is the most frequently detected language in the utterances of the conversation. - 'openai_moderation' (list): A list of OpenAI Moderation results. Each element in the list corresponds to one utterance in the conversation. - 'detoxify_moderation' (list): A list of Detoxify results. Each element in the list corresponds to one utterance in the conversation. - 'toxic' (bool): Whether this conversation contains any utterances considered to be toxic by either OpenAI Moderation or Detoxify. - 'redacted' (bool): Whether this conversation contains any utterances in which PII is detected and anonymized. ### Empty User Inputs This dataset includes a small subset of conversations where users submitted empty inputs, sometimes leading to hallucinated responses from the assistant. This issue, first noticed by @yuchenlin, arises from the design of our Huggingface chatbot used for data collection, which did not restrict the submission of empty inputs. As a result, users could submit without entering any text, causing the assistant to generate responses without any user prompts. This occurs in a small fraction of the dataset---12,405 out of 652,139 conversations. ### Licensing Information WildChat is made available under the AI2 ImpACT License - Low Risk Artifacts ("LR Agreement") Please consider citing our paper if you find this dataset useful:
[ "# Dataset Card for WildChat", "## Dataset Description\n \n- Paper: URL\n\n- License: URL\n\n- Language(s) (NLP): multi-lingual\n\n- Point of Contact: Yuntian Deng", "### Dataset Summary\n\nWildChat is a collection of 650K conversations between human users and ChatGPT. We collected WildChat by offering online users free access to OpenAI's GPT-3.5 and GPT-4. The dataset contains a broad spectrum of user-chatbot interactions that are not previously covered by other instruction fine-tuning datasets: for example, interactions include ambiguous user requests, code-switching, topic-switching, political discussions, etc. WildChat can serve both as a dataset for instructional fine-tuning and as a valuable resource for studying user behaviors. Note that this dataset contains toxic user inputs/ChatGPT responses. A nontoxic subset of this dataest can be found here.\n\nWildChat has been openly released under AI2's ImpACT license as a low-risk artifact. The use of WildChat to cause harm is strictly prohibited.", "### Languages\n\n66 languages were detected in WildChat.", "### Personal and Sensitive Information\n\nThe data has been de-identified with Microsoft Presidio and hand-written rules by the authors.", "### Data Fields\n\n- 'conversation_id' (string): Each conversation has a unique id.\n- 'model' (string): The underlying OpenAI model, such as gpt-3.5-turbo or gpt-4.\n- 'timestamp' (timestamp): The timestamp of the last turn in the conversation in UTC.\n- 'conversation' (list): A list of user/assistant utterances. Each utterance is a dictionary containing the 'role' of the speaker (user or assistant), the 'content' of the utterance, the detected 'language' of the utterance, whether the content of the utterance is considered 'toxic', and whether PII has been detected and anonymized ('redacted').\n- 'turn' (int): The number of turns in the conversation. A turn refers to one round of user-assistant interaction.\n- 'language' (string): The language of the conversation. Note that this is the most frequently detected language in the utterances of the conversation.\n- 'openai_moderation' (list): A list of OpenAI Moderation results. Each element in the list corresponds to one utterance in the conversation.\n- 'detoxify_moderation' (list): A list of Detoxify results. Each element in the list corresponds to one utterance in the conversation.\n- 'toxic' (bool): Whether this conversation contains any utterances considered to be toxic by either OpenAI Moderation or Detoxify.\n- 'redacted' (bool): Whether this conversation contains any utterances in which PII is detected and anonymized.", "### Empty User Inputs\n\nThis dataset includes a small subset of conversations where users submitted empty inputs, sometimes leading to hallucinated responses from the assistant. This issue, first noticed by @yuchenlin, arises from the design of our Huggingface chatbot used for data collection, which did not restrict the submission of empty inputs. As a result, users could submit without entering any text, causing the assistant to generate responses without any user prompts. This occurs in a small fraction of the dataset---12,405 out of 652,139 conversations.", "### Licensing Information\n\nWildChat is made available under the AI2\n ImpACT License - Low Risk Artifacts (\"LR\n Agreement\")\n\n\n\nPlease consider citing our paper if you find this dataset useful:" ]
[ "TAGS\n#task_categories-conversational #task_categories-text-generation #task_categories-question-answering #size_categories-100K<n<1M #not-for-all-audiences #instruction-finetuning #region-us \n", "# Dataset Card for WildChat", "## Dataset Description\n \n- Paper: URL\n\n- License: URL\n\n- Language(s) (NLP): multi-lingual\n\n- Point of Contact: Yuntian Deng", "### Dataset Summary\n\nWildChat is a collection of 650K conversations between human users and ChatGPT. We collected WildChat by offering online users free access to OpenAI's GPT-3.5 and GPT-4. The dataset contains a broad spectrum of user-chatbot interactions that are not previously covered by other instruction fine-tuning datasets: for example, interactions include ambiguous user requests, code-switching, topic-switching, political discussions, etc. WildChat can serve both as a dataset for instructional fine-tuning and as a valuable resource for studying user behaviors. Note that this dataset contains toxic user inputs/ChatGPT responses. A nontoxic subset of this dataest can be found here.\n\nWildChat has been openly released under AI2's ImpACT license as a low-risk artifact. The use of WildChat to cause harm is strictly prohibited.", "### Languages\n\n66 languages were detected in WildChat.", "### Personal and Sensitive Information\n\nThe data has been de-identified with Microsoft Presidio and hand-written rules by the authors.", "### Data Fields\n\n- 'conversation_id' (string): Each conversation has a unique id.\n- 'model' (string): The underlying OpenAI model, such as gpt-3.5-turbo or gpt-4.\n- 'timestamp' (timestamp): The timestamp of the last turn in the conversation in UTC.\n- 'conversation' (list): A list of user/assistant utterances. Each utterance is a dictionary containing the 'role' of the speaker (user or assistant), the 'content' of the utterance, the detected 'language' of the utterance, whether the content of the utterance is considered 'toxic', and whether PII has been detected and anonymized ('redacted').\n- 'turn' (int): The number of turns in the conversation. A turn refers to one round of user-assistant interaction.\n- 'language' (string): The language of the conversation. Note that this is the most frequently detected language in the utterances of the conversation.\n- 'openai_moderation' (list): A list of OpenAI Moderation results. Each element in the list corresponds to one utterance in the conversation.\n- 'detoxify_moderation' (list): A list of Detoxify results. Each element in the list corresponds to one utterance in the conversation.\n- 'toxic' (bool): Whether this conversation contains any utterances considered to be toxic by either OpenAI Moderation or Detoxify.\n- 'redacted' (bool): Whether this conversation contains any utterances in which PII is detected and anonymized.", "### Empty User Inputs\n\nThis dataset includes a small subset of conversations where users submitted empty inputs, sometimes leading to hallucinated responses from the assistant. This issue, first noticed by @yuchenlin, arises from the design of our Huggingface chatbot used for data collection, which did not restrict the submission of empty inputs. As a result, users could submit without entering any text, causing the assistant to generate responses without any user prompts. This occurs in a small fraction of the dataset---12,405 out of 652,139 conversations.", "### Licensing Information\n\nWildChat is made available under the AI2\n ImpACT License - Low Risk Artifacts (\"LR\n Agreement\")\n\n\n\nPlease consider citing our paper if you find this dataset useful:" ]
[ 66, 7, 34, 212, 14, 31, 371, 131, 42 ]
[ "passage: TAGS\n#task_categories-conversational #task_categories-text-generation #task_categories-question-answering #size_categories-100K<n<1M #not-for-all-audiences #instruction-finetuning #region-us \n# Dataset Card for WildChat## Dataset Description\n \n- Paper: URL\n\n- License: URL\n\n- Language(s) (NLP): multi-lingual\n\n- Point of Contact: Yuntian Deng### Dataset Summary\n\nWildChat is a collection of 650K conversations between human users and ChatGPT. We collected WildChat by offering online users free access to OpenAI's GPT-3.5 and GPT-4. The dataset contains a broad spectrum of user-chatbot interactions that are not previously covered by other instruction fine-tuning datasets: for example, interactions include ambiguous user requests, code-switching, topic-switching, political discussions, etc. WildChat can serve both as a dataset for instructional fine-tuning and as a valuable resource for studying user behaviors. Note that this dataset contains toxic user inputs/ChatGPT responses. A nontoxic subset of this dataest can be found here.\n\nWildChat has been openly released under AI2's ImpACT license as a low-risk artifact. The use of WildChat to cause harm is strictly prohibited.### Languages\n\n66 languages were detected in WildChat.### Personal and Sensitive Information\n\nThe data has been de-identified with Microsoft Presidio and hand-written rules by the authors." ]
48de990f2d1240be41aa1b6ba2dc07297e3d0dc9
Original, raw data can be found in Gene Expression Omnibus (GEO) https://www.ncbi.nlm.nih.gov/geo/
mwinn99/GPL570
[ "size_categories:100K<n<1M", "license:odbl", "biology", "region:us" ]
2023-10-27T23:16:04+00:00
{"license": "odbl", "size_categories": ["100K<n<1M"], "tags": ["biology"]}
2024-01-05T21:19:56+00:00
[]
[]
TAGS #size_categories-100K<n<1M #license-odbl #biology #region-us
Original, raw data can be found in Gene Expression Omnibus (GEO) URL
[]
[ "TAGS\n#size_categories-100K<n<1M #license-odbl #biology #region-us \n" ]
[ 27 ]
[ "passage: TAGS\n#size_categories-100K<n<1M #license-odbl #biology #region-us \n" ]
7be4ae2d4a21032e67530485007d1c894bed7434
## Introduction **M4LE** is a **M**ulti-ability, **M**ulti-range, **M**ulti-task, bilingual benchmark for long-context evaluation. We categorize long-context understanding into five distinct abilities by considering whether it is required to identify single or multiple spans in long contexts based on explicit or semantic hints. Specifically, these abilities are explicit single-span, semantic single-span, explicit multiple-span, semantic multiple-span, and global. Different from previous long-context benchmark that simply compile from a set of existing long NLP benchmarks, we introduce an automated method to transform short-sequence tasks into a comprehensive long-sequence scenario encompassing all these capabilities. M4LE consists of 36 tasks, covering 11 task types and 12 domains. For each task, we construct 200 instances for each context length bucket (1K, 2K, 4K, 6K, 8K, 12K, 16K, 24K, 32K). Due to computation and cost constraints, our paper evaluated 11 well-established LLMs on instances up to the 8K context length bucket. For more details, please refer to the paper available at <https://arxiv.org/abs/2310.19240>. You can also explore the Github page at <https://github.com/KwanWaiChung/M4LE>. ## Usage You can load the dataset by specifying the task name: ```python from datasets import load_dataset tasks = [ "arxiv", "bigpatent_global_cls", "bigpatent_global_sum", "booksum", "c3", "cepsum", "clts+", "cnewsum", "cnnnews", "drcd_explicit-single", "drcd_semantic-single", "duorc", "dureader", "hotpotqa", "lcsts", "marc", "mnds-news_explicit-single", "mnds-news_explicit-multiple", "mnds-news_semantic-multiple", "ncls", "news-commentary-en2zh", "news-commentary-zh2en", "news2016", "newsqa", "nq-open", "online-shopping", "open-subtitles-en2zh", "open-subtitles-zh2en", "pubmed", "tedtalks-en2zh", "tedtalks-zh2en", "thucnews_explicit-single", "thucnews_explicit-multiple", "thucnews_semantic-multiple", "triviaqa", "wiki2019zh", "wikihow", "wikitext-103", "wow", ] for task in tasks: data = load_dataset('wckwan/M4LE', task, split='test') ``` ## Format Each testing instance follows this format: ```yaml { "instruction": "<task description>", "input": "<task input with one-shot example>", "answers": ["<answer1>", "<answer2>"], "input_length": <int, number of words in instruction and input separated by space>, "total_length": <int, number of words in instruction, input and gold answer separated by space>, "length_bucket": <int, the length bucket to which this instance belongs> } ``` ## Tasks Here is the full list for the tasks with their descriptions. More details about these tasks, please refer to the paper . Ability | Task Name | Task Type | Language | Description ----------------- | ------------------------------------------- | ---------- | -------- | ------------------------------------------------------------------ Explicit Single | mnds-news_explicit-single | CLS + RET | En | Classify a specified news article. Explicit Single | thucnews_explicit-single | CLS + RET | Zh | Classify a specified news article. Explicit Single | newsqa | QA + RET | En | Answer a question based on a specified news article. Explicit Single | c3 | QA + RET | Zh | Answer a multi-choice question based on a textbook extract. Explicit Single | wow | RET | En | Return the ID of the article related to a specified topic. Explicit Single | drcd_explicit-single | RET | Zh | Return the ID of the article related to a specified topic. Explicit Single | cnnnews | SUM + RET | En | Summarize a specified news article. Explicit Single | cepsum | SUM + RET | Zh | Summarize a specified product description. Explicit Single | lcsts | SUM + RET | Zh | Summarize a specified news article. Explicit Single | ncls | SUM + RET | En, Zh | Summarize a specified news article. Explicit Multiple | mnds-news_explicit-multiple | CLS + RET | En | Return the IDs of all the articles belong to a specified class. Explicit Multiple | thucnews_explicit-multiple | CLS + RET | Zh | Return the IDs of all the articles belong to a specified class. Explicit Multiple | marc | CLS + RET | En, Zh | Return the IDs of all the positive product reviews. Explicit Multiple | online-shopping | CLS + RET | Zh | Return the IDs of all the positive product reviews. Semantic Single | wikitext-103 | NLI + RET | En | Return the ID of the paragraph that continues a query paragraph. Semantic Single | wiki2019zh | NLI + RET | Zh | Return the ID of the paragraph that continues a query paragraph. Semantic Single | duorc | QA | En | Answer a question based on multiple movie plots. Semantic Single | nq-open | QA | En | Answer a question based on multiple wikipedia paragraphs. Semantic Single | dureader | QA | Zh | Answer a question based on multiple web snippets. Semantic Single | drcd_semantic-single | QA | Zh | Answer a question based on multiple wikipedia paragraphs. Semantic Single | wikihow | SUM + RET | En | Summarize an article based on a given topic. Semantic Single | news2016 | SUM + RET | Zh | Summarize a news article based on a given title. Semantic Single | tedtalks-en2zh/tedtalks-zh2en | TRAN + RET | En, Zh | Translate a Ted Talk transcript based on a given title. Semantic Multiple | mnds-news_semantic-multiple | CLS + CNT | En | Return the number of news articles belonging to a specified class. Semantic Multiple | thucnews_semantic-multiple | CLS + CNT | Zh | Return the number of news articles belonging to a specified class. Semantic Multiple | hotpotqa | QA | En | Answer a question based on multiple wikipedia paragraphs. Global | bigpatent_global_cls | CLS | En | Classify a patent document. Global | triviaqa | QA | En | Answer a question based on a web snippet. Global | arxiv | SUM | En | Summarize an academic paper. Global | bigpatent_global_sum | SUM | En | Summarize a patent document. Global | pubmed | SUM | En | Summarize a medical paper. Global | booksum | SUM | En | Summarize one or more chapters of a book. Global | cnewsum | SUM | Zh | Summarize a news article. Global | clts+ | SUM | Zh | Summarize a news article. Global | open-subtitles-en2zh/open-subtitles-zh2en | TRAN | En, Zh | Translate the movie subtitles. Global | news-commentary-en2zh/news-commentary-zh2en | TRAN | En, Zh | Translate the movie subtitles. ## Citation If you find our paper and resources useful, please consider citing our paper: ```bibtex @misc{kwan_m4le_2023, title = {{{M4LE}}: {{A Multi-Ability Multi-Range Multi-Task Multi-Domain Long-Context Evaluation Benchmark}} for {{Large Language Models}}}, author = {Kwan, Wai-Chung and Zeng, Xingshan and Wang, Yufei and Sun, Yusen and Li, Liangyou and Shang, Lifeng and Liu, Qun and Wong, Kam-Fai}, year = {2023}, } ```
wckwan/M4LE
[ "task_categories:question-answering", "task_categories:translation", "task_categories:summarization", "task_categories:text-classification", "task_categories:text-retrieval", "size_categories:1K<n<10K", "language:en", "language:zh", "license:mit", "Long Context", "arxiv:2310.19240", "region:us" ]
2023-10-27T23:34:52+00:00
{"language": ["en", "zh"], "license": "mit", "size_categories": ["1K<n<10K"], "task_categories": ["question-answering", "translation", "summarization", "text-classification", "text-retrieval"], "tags": ["Long Context"]}
2023-11-19T04:13:34+00:00
[ "2310.19240" ]
[ "en", "zh" ]
TAGS #task_categories-question-answering #task_categories-translation #task_categories-summarization #task_categories-text-classification #task_categories-text-retrieval #size_categories-1K<n<10K #language-English #language-Chinese #license-mit #Long Context #arxiv-2310.19240 #region-us
Introduction ------------ M4LE is a Multi-ability, Multi-range, Multi-task, bilingual benchmark for long-context evaluation. We categorize long-context understanding into five distinct abilities by considering whether it is required to identify single or multiple spans in long contexts based on explicit or semantic hints. Specifically, these abilities are explicit single-span, semantic single-span, explicit multiple-span, semantic multiple-span, and global. Different from previous long-context benchmark that simply compile from a set of existing long NLP benchmarks, we introduce an automated method to transform short-sequence tasks into a comprehensive long-sequence scenario encompassing all these capabilities. M4LE consists of 36 tasks, covering 11 task types and 12 domains. For each task, we construct 200 instances for each context length bucket (1K, 2K, 4K, 6K, 8K, 12K, 16K, 24K, 32K). Due to computation and cost constraints, our paper evaluated 11 well-established LLMs on instances up to the 8K context length bucket. For more details, please refer to the paper available at <URL You can also explore the Github page at <URL Usage ----- You can load the dataset by specifying the task name: Format ------ Each testing instance follows this format: Tasks ----- Here is the full list for the tasks with their descriptions. More details about these tasks, please refer to the paper . If you find our paper and resources useful, please consider citing our paper:
[]
[ "TAGS\n#task_categories-question-answering #task_categories-translation #task_categories-summarization #task_categories-text-classification #task_categories-text-retrieval #size_categories-1K<n<10K #language-English #language-Chinese #license-mit #Long Context #arxiv-2310.19240 #region-us \n" ]
[ 100 ]
[ "passage: TAGS\n#task_categories-question-answering #task_categories-translation #task_categories-summarization #task_categories-text-classification #task_categories-text-retrieval #size_categories-1K<n<10K #language-English #language-Chinese #license-mit #Long Context #arxiv-2310.19240 #region-us \n" ]
44718562a085e763735eb69f18de1dc8fe499ebb
rows 10m to 10.1m in the DSIR pile
georgeyw/dsir-pile-100k
[ "region:us" ]
2023-10-28T00:33:32+00:00
{}
2023-12-22T22:33:53+00:00
[]
[]
TAGS #region-us
rows 10m to 10.1m in the DSIR pile
[]
[ "TAGS\n#region-us \n" ]
[ 6 ]
[ "passage: TAGS\n#region-us \n" ]
06286d7329f9838dce99087c87dfe8135c4a731d
# Dataset Card for "GRE_all_text" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
chirunder/GRE_all_text
[ "region:us" ]
2023-10-28T00:47:01+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5668464, "num_examples": 1}], "download_size": 2779298, "dataset_size": 5668464}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-28T00:47:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "GRE_all_text" More Information needed
[ "# Dataset Card for \"GRE_all_text\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"GRE_all_text\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"GRE_all_text\"\n\nMore Information needed" ]
2215643932118779996fba85a71726bf92141595
<div align="center"> <img width="640" alt="DanielCerda/pid-object-detection" src="https://huggingface.co/datasets/DanielCerda/pid-object-detection/resolve/main/thumbnail.jpg"> </div> ### Dataset Labels ``` ['ball-valve', 'butterfly-valve', 'centrifugal-pump', 'check-valve', 'gate-valve'] ``` ### Number of Images ```json {'valid': 12, 'test': 12, 'train': 128} ``` ### How to Use - Install [datasets](https://pypi.org/project/datasets/): ```bash pip install datasets ``` - Load the dataset: ```python from datasets import load_dataset ds = load_dataset("DanielCerda/pid-object-detection", name="full") example = ds['train'][0] ``` ### Roboflow Dataset Page [https://universe.roboflow.com/pid-smart-reader/pid_dataset/dataset/2](https://universe.roboflow.com/pid-smart-reader/pid_dataset/dataset/2?ref=roboflow2huggingface) ### Citation ``` @misc{ pid_dataset_dataset, title = { pid_dataset Dataset }, type = { Open Source Dataset }, author = { PID Smart Reader }, howpublished = { \\url{ https://universe.roboflow.com/pid-smart-reader/pid_dataset } }, url = { https://universe.roboflow.com/pid-smart-reader/pid_dataset }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2023 }, month = { feb }, note = { visited on 2023-10-28 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.com on February 10, 2023 at 3:14 AM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand and search unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time For state of the art Computer Vision training notebooks you can use with this dataset, visit https://github.com/roboflow/notebooks To find over 100k other datasets and pre-trained models, visit https://universe.roboflow.com The dataset includes 152 images. Piping-elements are annotated in COCO format. The following pre-processing was applied to each image: No image augmentation techniques were applied.
DanielCerda/pid-object-detection
[ "task_categories:object-detection", "roboflow", "roboflow2huggingface", "region:us" ]
2023-10-28T00:48:38+00:00
{"task_categories": ["object-detection"], "tags": ["roboflow", "roboflow2huggingface"]}
2023-10-28T00:50:16+00:00
[]
[]
TAGS #task_categories-object-detection #roboflow #roboflow2huggingface #region-us
<div align="center"> <img width="640" alt="DanielCerda/pid-object-detection" src="URL </div> ### Dataset Labels ### Number of Images ### How to Use - Install datasets: - Load the dataset: ### Roboflow Dataset Page URL ### License CC BY 4.0 ### Dataset Summary This dataset was exported via URL on February 10, 2023 at 3:14 AM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand and search unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time For state of the art Computer Vision training notebooks you can use with this dataset, visit URL To find over 100k other datasets and pre-trained models, visit URL The dataset includes 152 images. Piping-elements are annotated in COCO format. The following pre-processing was applied to each image: No image augmentation techniques were applied.
[ "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on February 10, 2023 at 3:14 AM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand and search unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nFor state of the art Computer Vision training notebooks you can use with this dataset,\nvisit URL\n\nTo find over 100k other datasets and pre-trained models, visit URL\n\nThe dataset includes 152 images.\nPiping-elements are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n\nNo image augmentation techniques were applied." ]
[ "TAGS\n#task_categories-object-detection #roboflow #roboflow2huggingface #region-us \n", "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on February 10, 2023 at 3:14 AM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand and search unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nFor state of the art Computer Vision training notebooks you can use with this dataset,\nvisit URL\n\nTo find over 100k other datasets and pre-trained models, visit URL\n\nThe dataset includes 152 images.\nPiping-elements are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n\nNo image augmentation techniques were applied." ]
[ 27, 5, 5, 18, 8, 6, 175 ]
[ "passage: TAGS\n#task_categories-object-detection #roboflow #roboflow2huggingface #region-us \n### Dataset Labels### Number of Images### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:### Roboflow Dataset Page\nURL### License\nCC BY 4.0### Dataset Summary\nThis dataset was exported via URL on February 10, 2023 at 3:14 AM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand and search unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nFor state of the art Computer Vision training notebooks you can use with this dataset,\nvisit URL\n\nTo find over 100k other datasets and pre-trained models, visit URL\n\nThe dataset includes 152 images.\nPiping-elements are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n\nNo image augmentation techniques were applied." ]
c60365cc7deaa092e75559f4933b183214646230
# Dataset Card for "GRE_all_text_word_freq" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
chirunder/GRE_all_text_word_freq
[ "region:us" ]
2023-10-28T01:20:14+00:00
{"dataset_info": {"features": [{"name": "word", "dtype": "string"}, {"name": "frequency", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 392007, "num_examples": 19836}], "download_size": 224362, "dataset_size": 392007}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-28T01:33:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "GRE_all_text_word_freq" More Information needed
[ "# Dataset Card for \"GRE_all_text_word_freq\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"GRE_all_text_word_freq\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"GRE_all_text_word_freq\"\n\nMore Information needed" ]
e60b0d4df14d72d90548c0063ec828ffeec82cbf
# Dataset Card for "soict_private_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
linhtran92/soict_private_test
[ "region:us" ]
2023-10-28T01:48:00+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 378888808.625, "num_examples": 2139}], "download_size": 351233150, "dataset_size": 378888808.625}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-28T01:48:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "soict_private_test" More Information needed
[ "# Dataset Card for \"soict_private_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"soict_private_test\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"soict_private_test\"\n\nMore Information needed" ]
c08d88092209aab3c1dfe7934028bdaccb6d377d
# Dataset Card for "random_pre" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
banghua/random_pre
[ "region:us" ]
2023-10-28T01:48:38+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "answers", "list": [{"name": "answer", "dtype": "string"}, {"name": "model", "dtype": "string"}, {"name": "rank", "dtype": "float64"}]}, {"name": "turns", "dtype": "int64"}, {"name": "num_responses", "dtype": "int64"}, {"name": "source", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1206940856, "num_examples": 182968}], "download_size": 551450326, "dataset_size": 1206940856}}
2023-10-28T01:55:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "random_pre" More Information needed
[ "# Dataset Card for \"random_pre\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"random_pre\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"random_pre\"\n\nMore Information needed" ]
872bf94e1ea41fdd98c98b2ad87b0a1eed70e2a2
# Dataset Card for "Vince_GRE_frequency" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
chirunder/Vince_GRE_frequency
[ "region:us" ]
2023-10-28T01:55:53+00:00
{"dataset_info": {"features": [{"name": "word", "dtype": "string"}, {"name": "frequency", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 58131, "num_examples": 2882}], "download_size": 31861, "dataset_size": 58131}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-28T01:55:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Vince_GRE_frequency" More Information needed
[ "# Dataset Card for \"Vince_GRE_frequency\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Vince_GRE_frequency\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"Vince_GRE_frequency\"\n\nMore Information needed" ]
314662ea7b77a9c73eab60ffe34c9a2ae723a4d6
# Dataset Card for "soict_private_test_fix" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
linhtran92/soict_private_test_fix
[ "region:us" ]
2023-10-28T02:20:56+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 378888808.625, "num_examples": 2139}], "download_size": 351233206, "dataset_size": 378888808.625}}
2023-10-28T02:21:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for "soict_private_test_fix" More Information needed
[ "# Dataset Card for \"soict_private_test_fix\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"soict_private_test_fix\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"soict_private_test_fix\"\n\nMore Information needed" ]
eafd0200d1a9b8dcf05ff802bde137d593f6ec56
# Dataset Card for "hf_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
salma-remyx/hf_test
[ "region:us" ]
2023-10-28T02:23:45+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "cat", "1": "dog"}}}}], "splits": [{"name": "train", "num_bytes": 7544483.0, "num_examples": 16}], "download_size": 7547089, "dataset_size": 7544483.0}}
2023-10-29T23:10:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hf_test" More Information needed
[ "# Dataset Card for \"hf_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hf_test\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"hf_test\"\n\nMore Information needed" ]
fc5c80873c72cab4632ee8b357f1cdbf790cf4ff
# Dataset Card for "r_pv4_wiz_all" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Minglii/r_pv4_wiz_all
[ "region:us" ]
2023-10-28T03:53:54+00:00
{"dataset_info": {"features": [{"name": "data", "struct": [{"name": "conversations", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "id", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 215231808, "num_examples": 76836}], "download_size": 103377449, "dataset_size": 215231808}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-28T03:54:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "r_pv4_wiz_all" More Information needed
[ "# Dataset Card for \"r_pv4_wiz_all\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"r_pv4_wiz_all\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"r_pv4_wiz_all\"\n\nMore Information needed" ]
e4d56fcdbaa2cb1b85d86774c52651cbd8b49a76
# Dataset Card for "ts-aims-reefscapes-satellite-segmentation" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
aimsks/ts-aims-reefscapes-satellite-segmentation
[ "region:us" ]
2023-10-28T04:06:06+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "binary"}, {"name": "segmentation", "dtype": "binary"}, {"name": "class_label", "dtype": "string"}, {"name": "bbox_epsg32754", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 1165209562, "num_examples": 1748}, {"name": "test", "num_bytes": 657217424, "num_examples": 981}, {"name": "validation", "num_bytes": 324309904, "num_examples": 487}], "download_size": 1322361933, "dataset_size": 2146736890}}
2023-10-28T04:54:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ts-aims-reefscapes-satellite-segmentation" More Information needed
[ "# Dataset Card for \"ts-aims-reefscapes-satellite-segmentation\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ts-aims-reefscapes-satellite-segmentation\"\n\nMore Information needed" ]
[ 6, 28 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"ts-aims-reefscapes-satellite-segmentation\"\n\nMore Information needed" ]
e908a90df9f1008ca63bb7af074f792e9b781f14
# Dataset Card for "tencentdata_speech_tokenizer" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
zion84006/tencentdata_speech_tokenizer
[ "region:us" ]
2023-10-28T04:06:44+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "valid", "path": "data/valid-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "file_id", "dtype": "int64"}, {"name": "wav_id", "dtype": "int64"}, {"name": "instruction", "dtype": "string"}, {"name": "transcription", "dtype": "string"}, {"name": "src_speech_tokenizer_0", "sequence": "int64"}, {"name": "src_speech_tokenizer_1", "sequence": "int64"}, {"name": "src_speech_tokenizer_2", "sequence": "int64"}, {"name": "src_speech_tokenizer_3", "sequence": "int64"}, {"name": "src_speech_tokenizer_4", "sequence": "int64"}, {"name": "src_speech_tokenizer_5", "sequence": "int64"}, {"name": "src_speech_tokenizer_6", "sequence": "int64"}, {"name": "src_speech_tokenizer_7", "sequence": "int64"}, {"name": "tgt_speech_tokenizer_0", "sequence": "int64"}, {"name": "tgt_speech_tokenizer_1", "sequence": "int64"}, {"name": "tgt_speech_tokenizer_2", "sequence": "int64"}, {"name": "tgt_speech_tokenizer_3", "sequence": "int64"}, {"name": "tgt_speech_tokenizer_4", "sequence": "int64"}, {"name": "tgt_speech_tokenizer_5", "sequence": "int64"}, {"name": "tgt_speech_tokenizer_6", "sequence": "int64"}, {"name": "tgt_speech_tokenizer_7", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 12406092460, "num_examples": 266780}, {"name": "valid", "num_bytes": 352367844, "num_examples": 7620}, {"name": "test", "num_bytes": 339389388, "num_examples": 7620}], "download_size": 708155490, "dataset_size": 13097849692}}
2023-11-10T08:23:25+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tencentdata_speech_tokenizer" More Information needed
[ "# Dataset Card for \"tencentdata_speech_tokenizer\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tencentdata_speech_tokenizer\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"tencentdata_speech_tokenizer\"\n\nMore Information needed" ]
ae860421a535e8288e127c59a0d2e408cc15d828
# **SD 1.5 Model Converter** <a target="_blank" href="https://colab.research.google.com/github/kieranxsomer/convert-scripts/blob/main/Converter_SD1_5_V2_Duct_TapeVersion.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> **A Colab Notebook To Convert SD 1.5 Checkpoint to Diffusers format** But a horribly duct taped edition. THIS IS IN ALPHA STAGES, WILL BE PATCHING THE CODE AS I GO ALONG. ♻ - USE ONLY FOR NOW: Converter_SD1_5_V2_Duct_TapeVersion.ipynb ♻ - THIS IN THEORY SHOULD WORK ON VAST/RUNPOD - BUT IT IS UNTESTED, JUST CHANGE YOUR DIRECTORIES AS NEEDED! RIGHT NOW THE INSTRUCTIONS ARE AS FOLLOWS: ♻ - Install/Clone etc ♻ - Download model - Direct port from Linaqruf. ♻ - Open code panel, replace model details. - don't move after you hit play, it does it really quickly. ♻ - Check file browser, if the model/yourmodelhere looks like a diffusers format you're good to go! ♻ - Write Token + Set up your Repo! ♻ - Upload Diffusers! --- ***Patched from*** : https://colab.research.google.com/github/Linaqruf/sdxl-model-converter/blob/main/sdxl_model_converter.ipynb ***Linaqruf @ Github***: https://github.com/Linaqruf ![visitors](https://visitor-badge.glitch.me/badge?page_id=linaqruf.lora-dreambooth) [![](https://dcbadge.vercel.app/api/shield/850007095775723532?style=flat)](https://lookup.guru/850007095775723532) [![ko-fi](https://img.shields.io/badge/Support%20me%20on%20Ko--fi-F16061?logo=ko-fi&logoColor=white&style=flat)](https://ko-fi.com/linaqruf) <a href="https://saweria.co/linaqruf"><img alt="Saweria" src="https://img.shields.io/badge/Saweria-7B3F00?style=flat&logo=ko-fi&logoColor=white"/></a> **Please use their main scripts for SDXL HERE:** | Notebook Name | Description | Link | | --- | --- | --- | | [Kohya LoRA Trainer XL](https://github.com/Linaqruf/kohya-trainer/blob/main/kohya-LoRA-trainer-XL.ipynb) | LoRA Training | [![](https://img.shields.io/static/v1?message=Open%20in%20Colab&logo=googlecolab&labelColor=5c5c5c&color=0f80c1&label=%20&style=flat)](https://colab.research.google.com/github/Linaqruf/kohya-trainer/blob/main/kohya-LoRA-trainer-XL.ipynb) | | [Kohya Trainer XL](https://github.com/Linaqruf/kohya-trainer/blob/main/kohya-trainer-XL.ipynb) | Native Training | [![](https://img.shields.io/static/v1?message=Open%20in%20Colab&logo=googlecolab&labelColor=5c5c5c&color=0f80c1&label=%20&style=flat)](https://colab.research.google.com/github/Linaqruf/kohya-trainer/blob/main/kohya-trainer-XL.ipynb) | SD 1.5 Scripts: | Notebook Name | Description | Link | V14 | | --- | --- | --- | --- | | [Kohya LoRA Dreambooth](https://github.com/Linaqruf/kohya-trainer/blob/main/kohya-LoRA-dreambooth.ipynb) | LoRA Training (Dreambooth method) | [![](https://img.shields.io/static/v1?message=Open%20in%20Colab&logo=googlecolab&labelColor=5c5c5c&color=0f80c1&label=%20&style=flat)](https://colab.research.google.com/github/Linaqruf/kohya-trainer/blob/main/kohya-LoRA-dreambooth.ipynb) | [![](https://img.shields.io/static/v1?message=Older%20Version&logo=googlecolab&labelColor=5c5c5c&color=e74c3c&label=%20&style=flat)](https://colab.research.google.com/github/Linaqruf/kohya-trainer/blob/ff701379c65380c967cd956e4e9e8f6349563878/kohya-LoRA-dreambooth.ipynb) | | [Kohya LoRA Fine-Tuning](https://github.com/Linaqruf/kohya-trainer/blob/main/kohya-LoRA-finetuner.ipynb) | LoRA Training (Fine-tune method) | [![](https://img.shields.io/static/v1?message=Open%20in%20Colab&logo=googlecolab&labelColor=5c5c5c&color=0f80c1&label=%20&style=flat)](https://colab.research.google.com/github/Linaqruf/kohya-trainer/blob/main/kohya-LoRA-finetuner.ipynb) | [![](https://img.shields.io/static/v1?message=Older%20Version&logo=googlecolab&labelColor=5c5c5c&color=e74c3c&label=%20&style=flat)](https://colab.research.google.com/github/Linaqruf/kohya-trainer/blob/ff701379c65380c967cd956e4e9e8f6349563878/kohya-LoRA-finetuner.ipynb) | | [Kohya Trainer](https://github.com/Linaqruf/kohya-trainer/blob/main/kohya-trainer.ipynb) | Native Training | [![](https://img.shields.io/static/v1?message=Open%20in%20Colab&logo=googlecolab&labelColor=5c5c5c&color=0f80c1&label=%20&style=flat)](https://colab.research.google.com/github/Linaqruf/kohya-trainer/blob/main/kohya-trainer.ipynb) | [![](https://img.shields.io/static/v1?message=Older%20Version&logo=googlecolab&labelColor=5c5c5c&color=e74c3c&label=%20&style=flat)](https://colab.research.google.com/github/Linaqruf/kohya-trainer/blob/ff701379c65380c967cd956e4e9e8f6349563878/kohya-trainer.ipynb) | | [Kohya Dreambooth](https://github.com/Linaqruf/kohya-trainer/blob/main/kohya-dreambooth.ipynb) | Dreambooth Training | [![](https://img.shields.io/static/v1?message=Open%20in%20Colab&logo=googlecolab&labelColor=5c5c5c&color=0f80c1&label=%20&style=flat)](https://colab.research.google.com/github/Linaqruf/kohya-trainer/blob/main/kohya-dreambooth.ipynb) | [![](https://img.shields.io/static/v1?message=Older%20Version&logo=googlecolab&labelColor=5c5c5c&color=e74c3c&label=%20&style=flat)](https://colab.research.google.com/github/Linaqruf/kohya-trainer/blob/ff701379c65380c967cd956e4e9e8f6349563878/kohya-dreambooth.ipynb) | Ahoy! you're looking for our Huggingface backup that is again patched from Linaqruf and others? | Notebook Name | Description | Link | | --- | --- | --- | | [Huggingface Backup](https://colab.research.google.com/github/kieranxsomer/HuggingFace_Backup/blob/main/HuggingFace_Backup.ipynb) | backup checkpoints! | [![](https://img.shields.io/static/v1?message=Open%20in%20Colab&logo=googlecolab&labelColor=5c5c5c&color=0f80c1&label=%20&style=flat)](https://colab.research.google.com/github/kieranxsomer/HuggingFace_Backup/blob/main/HuggingFace_Backup.ipynb) | [1.5 Conversions](https://github.com/kieranxsomer/convert-scripts/blob/main/Converter_SD1_5_V2_Duct_TapeVersion.ipynb) | Convert to Diffusers! | [![](https://img.shields.io/static/v1?message=Open%20in%20Colab&logo=googlecolab&labelColor=5c5c5c&color=0f80c1&label=%20&style=flat)](https://github.com/kieranxsomer/convert-scripts/blob/main/Converter_SD1_5_V2_Duct_TapeVersion.ipynb) ## Duskfall/ Earth & Dusk Socials ![Discord](https://img.shields.io/discord/1024442483750490222?label=Earth%26Dusk&style=plastic) | Social Network | Link | | --- | --- | |Discord|[Invite](https://discord.gg/5t2kYxt7An) |CivitAi|[Duskfallcrew](https://civitai.com/user/duskfallcrew/) |Huggingface|[Earth & Dusk](https://huggingface.co/EarthnDusk) |Ko-Fi| [Dusk's Kofi](https://ko-fi.com/duskfallcrew/)
EarthnDusk/SD-Convert-1-5
[ "size_categories:n<1K", "language:en", "license:creativeml-openrail-m", "code", "region:us" ]
2023-10-28T04:25:40+00:00
{"language": ["en"], "license": "creativeml-openrail-m", "size_categories": ["n<1K"], "pretty_name": "Convert SD 1.5 to Diffusers", "tags": ["code"]}
2023-10-28T04:26:48+00:00
[]
[ "en" ]
TAGS #size_categories-n<1K #language-English #license-creativeml-openrail-m #code #region-us
SD 1.5 Model Converter ====================== <a target="\_blank" href="URL <img src="URL alt="Open In Colab"/> A Colab Notebook To Convert SD 1.5 Checkpoint to Diffusers format But a horribly duct taped edition. THIS IS IN ALPHA STAGES, WILL BE PATCHING THE CODE AS I GO ALONG. * USE ONLY FOR NOW: Converter\_SD1\_5\_V2\_Duct\_TapeVersion.ipynb * THIS IN THEORY SHOULD WORK ON VAST/RUNPOD - BUT IT IS UNTESTED, JUST CHANGE YOUR DIRECTORIES AS NEEDED! RIGHT NOW THE INSTRUCTIONS ARE AS FOLLOWS: * Install/Clone etc * Download model - Direct port from Linaqruf. * Open code panel, replace model details. - don't move after you hit play, it does it really quickly. * Check file browser, if the model/yourmodelhere looks like a diffusers format you're good to go! * Write Token + Set up your Repo! * Upload Diffusers! --- *Patched from* : URL *Linaqruf @ Github*: URL !visitors ![](URL ![ko-fi](URL <a href="URL alt="Saweria" src="URL Please use their main scripts for SDXL HERE: Notebook Name: Kohya LoRA Trainer XL, Description: LoRA Training, Link: ![](URL Notebook Name: Kohya Trainer XL, Description: Native Training, Link: ![](URL SD 1.5 Scripts: Ahoy! you're looking for our Huggingface backup that is again patched from Linaqruf and others? Notebook Name: Huggingface Backup, Description: backup checkpoints!, Link: ![](URL Notebook Name: 1.5 Conversions, Description: Convert to Diffusers!, Link: ![](URL Duskfall/ Earth & Dusk Socials ------------------------------ !Discord
[]
[ "TAGS\n#size_categories-n<1K #language-English #license-creativeml-openrail-m #code #region-us \n" ]
[ 34 ]
[ "passage: TAGS\n#size_categories-n<1K #language-English #license-creativeml-openrail-m #code #region-us \n" ]
a053d286186504a7e340142f4d7c2f705e2ebfe1
# Dataset Card for "slimorca-i18n" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
winglian/slimorca-i18n
[ "region:us" ]
2023-10-28T05:26:33+00:00
{"dataset_info": {"features": [{"name": "old_conversation", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}, {"name": "weight", "dtype": "float64"}]}, {"name": "conversation", "list": [{"name": "from", "dtype": "string"}, {"name": "value", "dtype": "string"}]}, {"name": "target_language", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 231913396, "num_examples": 86888}], "download_size": 126668621, "dataset_size": 231913396}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-11-03T08:40:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "slimorca-i18n" More Information needed
[ "# Dataset Card for \"slimorca-i18n\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"slimorca-i18n\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"slimorca-i18n\"\n\nMore Information needed" ]
31d5705e27c6a88ddc86eca1fbe1776c805a5a79
# Dataset Card for "imagenet-1k-rand_hog" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
acozma/imagenet-1k-rand_hog
[ "region:us" ]
2023-10-28T05:30:09+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "conditioning_image", "dtype": "image"}, {"name": "text", "dtype": "string"}, {"name": "params", "struct": [{"name": "orientations", "dtype": "int64"}, {"name": "pixels_per_cell", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 235174567045.0, "num_examples": 500000}], "download_size": 89659059126, "dataset_size": 235174567045.0}}
2023-11-01T07:18:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "imagenet-1k-rand_hog" More Information needed
[ "# Dataset Card for \"imagenet-1k-rand_hog\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"imagenet-1k-rand_hog\"\n\nMore Information needed" ]
[ 6, 19 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"imagenet-1k-rand_hog\"\n\nMore Information needed" ]
6ae747cd92c76cd61eddd296a97f0897af08ea7b
# Dataset Card for "soict_private_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
thanhduycao/soict_private_test
[ "region:us" ]
2023-10-28T05:31:03+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "audio", "sequence": "float32"}], "splits": [{"name": "train", "num_bytes": 567721148, "num_examples": 2139}], "download_size": 461186944, "dataset_size": 567721148}}
2023-10-28T05:31:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "soict_private_test" More Information needed
[ "# Dataset Card for \"soict_private_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"soict_private_test\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"soict_private_test\"\n\nMore Information needed" ]
8494c5decb884fe14dc81f3f0954c0bf564e1e2f
# Dataset Card for "test_1028_v1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
li-ping/test_1028_v1
[ "region:us" ]
2023-10-28T05:33:46+00:00
{"dataset_info": {"features": [{"name": "set", "struct": [{"name": "neg", "sequence": "string"}, {"name": "pos", "sequence": "string"}, {"name": "query", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 2593205, "num_examples": 1848}], "download_size": 120725, "dataset_size": 2593205}}
2023-10-28T08:25:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test_1028_v1" More Information needed
[ "# Dataset Card for \"test_1028_v1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test_1028_v1\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"test_1028_v1\"\n\nMore Information needed" ]
ae08c38be295bb1e785049b997778d3cef3988f5
# Dataset Card for "soict_private_test_v1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
thanhduycao/soict_private_test_v1
[ "region:us" ]
2023-10-28T05:37:35+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "audio", "struct": [{"name": "array", "sequence": "float32"}, {"name": "path", "dtype": "string"}, {"name": "sampling_rate", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 567746816, "num_examples": 2139}], "download_size": 461190048, "dataset_size": 567746816}}
2023-10-28T05:49:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "soict_private_test_v1" More Information needed
[ "# Dataset Card for \"soict_private_test_v1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"soict_private_test_v1\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"soict_private_test_v1\"\n\nMore Information needed" ]
e932916b84244b486010a6f27ed31eea70c82b5d
# Dataset Card for "soict_private_test_v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
thanhduycao/soict_private_test_v2
[ "region:us" ]
2023-10-28T05:51:47+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "audio", "struct": [{"name": "array", "sequence": "float32"}, {"name": "path", "dtype": "string"}, {"name": "sampling_rate", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 567746816, "num_examples": 2139}], "download_size": 461190048, "dataset_size": 567746816}}
2023-10-28T05:52:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "soict_private_test_v2" More Information needed
[ "# Dataset Card for \"soict_private_test_v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"soict_private_test_v2\"\n\nMore Information needed" ]
[ 6, 21 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"soict_private_test_v2\"\n\nMore Information needed" ]
d37dcd05f6605cb9d4ec717b66ebcca96f83599b
# Dataset Card for "prm800k-llama-v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
parksimon0808/prm800k-mistral-verifier
[ "region:us" ]
2023-10-28T06:07:03+00:00
{"dataset_info": {"features": [{"name": "texts", "dtype": "string"}, {"name": "input_ids", "sequence": "int32"}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 4539556004, "num_examples": 1052290}, {"name": "test", "num_bytes": 145304218, "num_examples": 32408}], "download_size": 342834121, "dataset_size": 4684860222}}
2023-11-08T21:43:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "prm800k-llama-v2" More Information needed
[ "# Dataset Card for \"prm800k-llama-v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"prm800k-llama-v2\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"prm800k-llama-v2\"\n\nMore Information needed" ]
cbe27d03df87e6195063c0d9525a70592f4cfd8d
# Dataset Card for "enamine_natural_products" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
phanvancongthanh/enamine_natural_products
[ "region:us" ]
2023-10-28T06:46:15+00:00
{"dataset_info": {"features": [{"name": "smiles", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 8749525383, "num_examples": 156999999}], "download_size": 3533601644, "dataset_size": 8749525383}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-29T00:52:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "enamine_natural_products" More Information needed
[ "# Dataset Card for \"enamine_natural_products\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"enamine_natural_products\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"enamine_natural_products\"\n\nMore Information needed" ]
81bae08360863957f2fec2c1dacca2978866a6d3
## Dataset Card: Instruction-Based English-Nepali Translation Dataset ### Dataset Description This dataset consists of English-Nepali parallel sentences converted into an instruction-based format. Each entry prompts the model to translate a given sentence from English to Nepali or vice versa. ### Source Data **Original Dataset**: English-Nepali Parallel Sentences **Paper**: [NepBERTa: Nepali Language Model Trained in a Large Corpus](https://aura.abdn.ac.uk/bitstream/handle/2164/21465/Timilsina_etal_ACLA_NepNERTa_VOR.pdf) **Authors**: Milan Gautam, Sulav Timilsina, Binod Bhattarai **Conference**: Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing (Volume 2: Short Papers) ### Dataset Format Each entry in the dataset has the following format: ``` [INST] Please translate "sentence in source language" into target language [/INST] translation in target language ``` The dataset supports both English to Nepali and Nepali to English translations. ### Intended Use This dataset is designed for fine-tuning models on instruction-based translation tasks, especially suited for models like Llama Instruct. It can be used to develop models capable of translating between English and Nepali using instruction-based prompts. ### Data Collection The data was derived from the English-Nepali parallel corpus presented in the NepBERTa paper. The sentences were then converted into an instruction-based format to facilitate training with instruction-based models. ### Limitations - The dataset's performance and utility are tied to the quality of the original English-Nepali corpus. - The instruction-based format may introduce some redundancy and might not be ideal for all NLP tasks or models. ### Licensing Ensure you have the right to share the data and understand any licensing implications. Mention the dataset's licensing terms here. ---
ashokpoudel/English-Nepali-Translation-Instruction-Dataset
[ "region:us" ]
2023-10-28T07:02:31+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1712164438, "num_examples": 3560496}], "download_size": 775881227, "dataset_size": 1712164438}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-28T07:11:10+00:00
[]
[]
TAGS #region-us
## Dataset Card: Instruction-Based English-Nepali Translation Dataset ### Dataset Description This dataset consists of English-Nepali parallel sentences converted into an instruction-based format. Each entry prompts the model to translate a given sentence from English to Nepali or vice versa. ### Source Data Original Dataset: English-Nepali Parallel Sentences Paper: NepBERTa: Nepali Language Model Trained in a Large Corpus Authors: Milan Gautam, Sulav Timilsina, Binod Bhattarai Conference: Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing (Volume 2: Short Papers) ### Dataset Format Each entry in the dataset has the following format: The dataset supports both English to Nepali and Nepali to English translations. ### Intended Use This dataset is designed for fine-tuning models on instruction-based translation tasks, especially suited for models like Llama Instruct. It can be used to develop models capable of translating between English and Nepali using instruction-based prompts. ### Data Collection The data was derived from the English-Nepali parallel corpus presented in the NepBERTa paper. The sentences were then converted into an instruction-based format to facilitate training with instruction-based models. ### Limitations - The dataset's performance and utility are tied to the quality of the original English-Nepali corpus. - The instruction-based format may introduce some redundancy and might not be ideal for all NLP tasks or models. ### Licensing Ensure you have the right to share the data and understand any licensing implications. Mention the dataset's licensing terms here. ---
[ "## Dataset Card: Instruction-Based English-Nepali Translation Dataset", "### Dataset Description\n\nThis dataset consists of English-Nepali parallel sentences converted into an instruction-based format. Each entry prompts the model to translate a given sentence from English to Nepali or vice versa.", "### Source Data\n\nOriginal Dataset: English-Nepali Parallel Sentences \nPaper: NepBERTa: Nepali Language Model Trained in a Large Corpus \nAuthors: Milan Gautam, Sulav Timilsina, Binod Bhattarai \nConference: Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing (Volume 2: Short Papers)", "### Dataset Format\n\nEach entry in the dataset has the following format:\n\n\n\nThe dataset supports both English to Nepali and Nepali to English translations.", "### Intended Use\n\nThis dataset is designed for fine-tuning models on instruction-based translation tasks, especially suited for models like Llama Instruct. It can be used to develop models capable of translating between English and Nepali using instruction-based prompts.", "### Data Collection\n\nThe data was derived from the English-Nepali parallel corpus presented in the NepBERTa paper. The sentences were then converted into an instruction-based format to facilitate training with instruction-based models.", "### Limitations\n\n- The dataset's performance and utility are tied to the quality of the original English-Nepali corpus.\n- The instruction-based format may introduce some redundancy and might not be ideal for all NLP tasks or models.", "### Licensing\n\nEnsure you have the right to share the data and understand any licensing implications. Mention the dataset's licensing terms here.\n\n---" ]
[ "TAGS\n#region-us \n", "## Dataset Card: Instruction-Based English-Nepali Translation Dataset", "### Dataset Description\n\nThis dataset consists of English-Nepali parallel sentences converted into an instruction-based format. Each entry prompts the model to translate a given sentence from English to Nepali or vice versa.", "### Source Data\n\nOriginal Dataset: English-Nepali Parallel Sentences \nPaper: NepBERTa: Nepali Language Model Trained in a Large Corpus \nAuthors: Milan Gautam, Sulav Timilsina, Binod Bhattarai \nConference: Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing (Volume 2: Short Papers)", "### Dataset Format\n\nEach entry in the dataset has the following format:\n\n\n\nThe dataset supports both English to Nepali and Nepali to English translations.", "### Intended Use\n\nThis dataset is designed for fine-tuning models on instruction-based translation tasks, especially suited for models like Llama Instruct. It can be used to develop models capable of translating between English and Nepali using instruction-based prompts.", "### Data Collection\n\nThe data was derived from the English-Nepali parallel corpus presented in the NepBERTa paper. The sentences were then converted into an instruction-based format to facilitate training with instruction-based models.", "### Limitations\n\n- The dataset's performance and utility are tied to the quality of the original English-Nepali corpus.\n- The instruction-based format may introduce some redundancy and might not be ideal for all NLP tasks or models.", "### Licensing\n\nEnsure you have the right to share the data and understand any licensing implications. Mention the dataset's licensing terms here.\n\n---" ]
[ 6, 17, 48, 99, 32, 61, 52, 55, 38 ]
[ "passage: TAGS\n#region-us \n## Dataset Card: Instruction-Based English-Nepali Translation Dataset### Dataset Description\n\nThis dataset consists of English-Nepali parallel sentences converted into an instruction-based format. Each entry prompts the model to translate a given sentence from English to Nepali or vice versa.### Source Data\n\nOriginal Dataset: English-Nepali Parallel Sentences \nPaper: NepBERTa: Nepali Language Model Trained in a Large Corpus \nAuthors: Milan Gautam, Sulav Timilsina, Binod Bhattarai \nConference: Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing (Volume 2: Short Papers)### Dataset Format\n\nEach entry in the dataset has the following format:\n\n\n\nThe dataset supports both English to Nepali and Nepali to English translations.### Intended Use\n\nThis dataset is designed for fine-tuning models on instruction-based translation tasks, especially suited for models like Llama Instruct. It can be used to develop models capable of translating between English and Nepali using instruction-based prompts.### Data Collection\n\nThe data was derived from the English-Nepali parallel corpus presented in the NepBERTa paper. The sentences were then converted into an instruction-based format to facilitate training with instruction-based models.### Limitations\n\n- The dataset's performance and utility are tied to the quality of the original English-Nepali corpus.\n- The instruction-based format may introduce some redundancy and might not be ideal for all NLP tasks or models.### Licensing\n\nEnsure you have the right to share the data and understand any licensing implications. Mention the dataset's licensing terms here.\n\n---" ]
1a032c78dd9b22fa9a20058e7fba5b6d299c0772
## THIS DATASET IS ONLY MADE FOR THESE # ID name color # 1. ball yellow # 2. battery silver # 3. wood wood # 4. bowl white
gokul00060/armchat1
[ "license:mit", "region:us" ]
2023-10-28T07:02:33+00:00
{"license": "mit"}
2023-10-28T08:29:18+00:00
[]
[]
TAGS #license-mit #region-us
## THIS DATASET IS ONLY MADE FOR THESE # ID name color # 1. ball yellow # 2. battery silver # 3. wood wood # 4. bowl white
[ "## THIS DATASET IS ONLY MADE FOR THESE", "# ID name color", "# 1. ball yellow", "# 2. battery silver", "# 3. wood wood", "# 4. bowl white" ]
[ "TAGS\n#license-mit #region-us \n", "## THIS DATASET IS ONLY MADE FOR THESE", "# ID name color", "# 1. ball yellow", "# 2. battery silver", "# 3. wood wood", "# 4. bowl white" ]
[ 11, 14, 4, 4, 4, 4, 4 ]
[ "passage: TAGS\n#license-mit #region-us \n## THIS DATASET IS ONLY MADE FOR THESE# ID name color# 1. ball yellow# 2. battery silver# 3. wood wood# 4. bowl white" ]
c18cd4269a2139b80aa45a94b0e34ade50546a2b
# Dataset Card for "private_model_tts3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
quocanh34/private_model_tts3
[ "region:us" ]
2023-10-28T07:03:51+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "audio", "struct": [{"name": "array", "sequence": "float32"}, {"name": "path", "dtype": "string"}, {"name": "sampling_rate", "dtype": "int64"}]}, {"name": "pred_str", "dtype": "string"}, {"name": "pred_str_norm", "dtype": "string"}, {"name": "intent", "dtype": "string"}, {"name": "entities", "list": [{"name": "filler", "dtype": "string"}, {"name": "type", "dtype": "string"}]}, {"name": "file", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 568314768, "num_examples": 2139}], "download_size": 462243736, "dataset_size": 568314768}}
2023-10-28T07:05:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "private_model_tts3" More Information needed
[ "# Dataset Card for \"private_model_tts3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"private_model_tts3\"\n\nMore Information needed" ]
[ 6, 18 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"private_model_tts3\"\n\nMore Information needed" ]
d8854052dfd66d15b2a7f375a9b4a1c0dadc1eb7
# Dataset Card for "private_model_tts3_no_denoise" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
quocanh34/private_model_tts3_no_denoise
[ "region:us" ]
2023-10-28T07:36:14+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "audio", "struct": [{"name": "array", "sequence": "float32"}, {"name": "path", "dtype": "string"}, {"name": "sampling_rate", "dtype": "int64"}]}, {"name": "pred_str", "dtype": "string"}, {"name": "pred_str_norm", "dtype": "string"}, {"name": "intent", "dtype": "string"}, {"name": "entities", "list": [{"name": "filler", "dtype": "string"}, {"name": "type", "dtype": "string"}]}, {"name": "file", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 568313120, "num_examples": 2139}], "download_size": 462242144, "dataset_size": 568313120}}
2023-10-28T07:37:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "private_model_tts3_no_denoise" More Information needed
[ "# Dataset Card for \"private_model_tts3_no_denoise\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"private_model_tts3_no_denoise\"\n\nMore Information needed" ]
[ 6, 23 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"private_model_tts3_no_denoise\"\n\nMore Information needed" ]
9ce82dd42e8fbe2e205b46f734fdfc00293fb3cf
# Dataset Card for "cifar10_dataset_th_en" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
BossBossNJb/cifar10_dataset_th_en
[ "region:us" ]
2023-10-28T08:13:04+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "dataset_info": {"features": [{"name": "img", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "airplane", "1": "automobile", "2": "bird", "3": "cat", "4": "deer", "5": "dog", "6": "frog", "7": "horse", "8": "ship", "9": "truck"}}}}, {"name": "en", "dtype": "string"}, {"name": "th", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 115003310.0, "num_examples": 50000}, {"name": "test", "num_bytes": 23002580.0, "num_examples": 10000}], "download_size": 144125889, "dataset_size": 138005890.0}}
2023-10-28T08:13:12+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cifar10_dataset_th_en" More Information needed
[ "# Dataset Card for \"cifar10_dataset_th_en\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cifar10_dataset_th_en\"\n\nMore Information needed" ]
[ 6, 20 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"cifar10_dataset_th_en\"\n\nMore Information needed" ]
26a57f3cf24136aa8023dfeec4126bebd951f8b4
# Dataset Card for "pan-tadeusz" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
szymonrucinski/pan-tadeusz
[ "region:us" ]
2023-10-28T08:58:08+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 464733, "num_examples": 797}, {"name": "validation", "num_bytes": 16271, "num_examples": 25}], "download_size": 362939, "dataset_size": 481004}}
2023-10-28T08:59:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "pan-tadeusz" More Information needed
[ "# Dataset Card for \"pan-tadeusz\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"pan-tadeusz\"\n\nMore Information needed" ]
[ 6, 15 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"pan-tadeusz\"\n\nMore Information needed" ]
85a7a306854581cd3112527cd5f5c9d4ba2d06ed
# Guanaco-1k: Lazy Llama 2 Formatting This is a subset (1000 samples) of the excellent [`timdettmers/openassistant-guanaco`](https://huggingface.co/datasets/timdettmers/openassistant-guanaco) dataset, processed to match Llama 2's prompt format as described [in this article](https://huggingface.co/blog/llama2#how-to-prompt-llama-2). It was created using the following [colab notebook](https://colab.research.google.com/drive/1Ad7a9zMmkxuXTOh1Z7-rNSICA4dybpM2?usp=sharing). Useful if you don't want to reformat it by yourself (e.g., using a script). It was designed for [this article](https://mlabonne.github.io/blog/posts/Fine_Tune_Your_Own_Llama_2_Model_in_a_Colab_Notebook.html) about fine-tuning a Llama 2 (chat) model in a Google Colab.
M0hammed87/DictionaryTrain
[ "region:us" ]
2023-10-28T09:33:23+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1654448, "num_examples": 1000}], "download_size": 966693, "dataset_size": 1654448}, "configs": [{"config_name": "default"}]}
2023-10-31T01:19:58+00:00
[]
[]
TAGS #region-us
# Guanaco-1k: Lazy Llama 2 Formatting This is a subset (1000 samples) of the excellent 'timdettmers/openassistant-guanaco' dataset, processed to match Llama 2's prompt format as described in this article. It was created using the following colab notebook. Useful if you don't want to reformat it by yourself (e.g., using a script). It was designed for this article about fine-tuning a Llama 2 (chat) model in a Google Colab.
[ "# Guanaco-1k: Lazy Llama 2 Formatting\n\nThis is a subset (1000 samples) of the excellent 'timdettmers/openassistant-guanaco' dataset, processed to match Llama 2's prompt format as described in this article. It was created using the following colab notebook.\n\nUseful if you don't want to reformat it by yourself (e.g., using a script). It was designed for this article about fine-tuning a Llama 2 (chat) model in a Google Colab." ]
[ "TAGS\n#region-us \n", "# Guanaco-1k: Lazy Llama 2 Formatting\n\nThis is a subset (1000 samples) of the excellent 'timdettmers/openassistant-guanaco' dataset, processed to match Llama 2's prompt format as described in this article. It was created using the following colab notebook.\n\nUseful if you don't want to reformat it by yourself (e.g., using a script). It was designed for this article about fine-tuning a Llama 2 (chat) model in a Google Colab." ]
[ 6, 120 ]
[ "passage: TAGS\n#region-us \n# Guanaco-1k: Lazy Llama 2 Formatting\n\nThis is a subset (1000 samples) of the excellent 'timdettmers/openassistant-guanaco' dataset, processed to match Llama 2's prompt format as described in this article. It was created using the following colab notebook.\n\nUseful if you don't want to reformat it by yourself (e.g., using a script). It was designed for this article about fine-tuning a Llama 2 (chat) model in a Google Colab." ]
06f419064c82cdee32792253d8f81ad7d5a8a48f
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
Harsh-7300/english_to_french
[ "task_categories:translation", "size_categories:1K<n<10K", "language:en", "language:fr", "license:mit", "legal", "region:us" ]
2023-10-28T09:44:49+00:00
{"language": ["en", "fr"], "license": "mit", "size_categories": ["1K<n<10K"], "task_categories": ["translation"], "pretty_name": "dataset3", "dataset_card": "H@rsh7300", "tags": ["legal"]}
2023-11-09T14:44:33+00:00
[]
[ "en", "fr" ]
TAGS #task_categories-translation #size_categories-1K<n<10K #language-English #language-French #license-mit #legal #region-us
# Dataset Card for Dataset Name This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#task_categories-translation #size_categories-1K<n<10K #language-English #language-French #license-mit #legal #region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ 44, 34, 4, 40, 29, 3, 4, 9, 6, 5, 7, 4, 7, 10, 9, 5, 9, 8, 10, 46, 8, 7, 10, 5 ]
[ "passage: TAGS\n#task_categories-translation #size_categories-1K<n<10K #language-English #language-French #license-mit #legal #region-us \n# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.## Dataset Details### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:## Uses### Direct Use### Out-of-Scope Use## Dataset Structure## Dataset Creation### Curation Rationale### Source Data#### Data Collection and Processing#### Who are the source data producers?### Annotations [optional]#### Annotation process#### Who are the annotators?#### Personal and Sensitive Information## Bias, Risks, and Limitations### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:## Glossary [optional]## More Information [optional]## Dataset Card Authors [optional]## Dataset Card Contact" ]
d87f6c1967c112ff5213e216e81a4e0b5f48c907
# AutoTrain Dataset for project: test ## Dataset Description This dataset has been automatically processed by AutoTrain for project test. ### Languages The BCP-47 code for the dataset's language is en. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "feat_id": "13829542", "text": "Kasia: When are u coming back?\r\nMatt: Back where?\r\nKasia: Oh come on\r\nKasia: you know what i mean\r\nMatt: I really don't \r\nKasia: When are you coming back to Warsaw\r\nMatt: I have no idea\r\nMatt: maybe around easter\r\nKasia: will you let me know\r\nMatt: sure if I know something then I will let you know asap\r\nKasia: ok \r\nMatt: are you mad?\r\nKasia: a bit\r\nMatt: oh come on\r\nMatt: this is not my fault \r\nMatt: there is no way that I can answer that question\r\nMatt: not now\r\nKasia: Fine", "target": "Matt doesn't know when he's coming back to Warsaw. He might come around Easter. When he knows more, he will let Kasia know. Kasia is a bit upset." }, { "feat_id": "13862523", "text": "Oliver: Have you beaten the game yet?\nTom: Not yet\nOliver: Ok... what mission are you playing?\nTom: The one before the final one, it's pretty hard\nOliver: I didn't find it particularly hard\nTom: I mean, combat is easy at this point in the game but the puzzles are difficult\nOliver: Ok, I got it\nTom: It's fun how most horror action games let you get really powerful by the end of the story\nOliver: Well, you know, being a pussy from start to finish ain't my idea of fun even in a horror game\nTom: I know... but do you remember Bioshock? You turned into some sort of superhero and the final boss was pretty much a joke\nOliver: Don't you dare talk like that about my favorite game\nTom: I know, I love it too, but it had its flaws\nOliver: No it didn't XD\nTom: Lol\nOliver: Well, keep playing\nTom: I'll let you know when I finish this one and my overall impressions\nOliver: Ok", "target": "Tom will contact Oliver after finishing new horror action game." } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "feat_id": "Value(dtype='string', id=None)", "text": "Value(dtype='string', id=None)", "target": "Value(dtype='string', id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 11785 | | valid | 2947 |
j23349/autotrain-data-test
[ "task_categories:summarization", "language:en", "region:us" ]
2023-10-28T10:07:09+00:00
{"language": ["en"], "task_categories": ["summarization"]}
2023-10-28T10:09:51+00:00
[]
[ "en" ]
TAGS #task_categories-summarization #language-English #region-us
AutoTrain Dataset for project: test =================================== Dataset Description ------------------- This dataset has been automatically processed by AutoTrain for project test. ### Languages The BCP-47 code for the dataset's language is en. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Languages\n\n\nThe BCP-47 code for the dataset's language is en.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#task_categories-summarization #language-English #region-us \n", "### Languages\n\n\nThe BCP-47 code for the dataset's language is en.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ 20, 26, 17, 23, 27 ]
[ "passage: TAGS\n#task_categories-summarization #language-English #region-us \n### Languages\n\n\nThe BCP-47 code for the dataset's language is en.\n\n\nDataset Structure\n-----------------### Data Instances\n\n\nA sample from this dataset looks as follows:### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
1e5d2cde3a902ecf8f56eb3f5997a9de8bc93664
The modded version of https://huggingface.co/datasets/app_reviews ## Dataset Card for app-reviews-modded ### Dataset Summary It is a large dataset of Android applications belonging to 23 different app categories, which provides an overview of the types of feedback users report on the apps and documents the evolution of the related code metrics. The dataset contains about 395 applications of the F-Droid repository, including around 600 versions, 280,000 user reviews (extracted with specific text mining approaches) ### Supported Tasks and Leaderboards The dataset we provide comprises 395 different apps from the F-Droid repository, including code quality indicators of 629 versions of these apps. It also encloses app reviews related to each of these versions, which have been automatically categorized classifying types of user feedback from a software maintenance and evolution perspective. **Languages** The dataset is a monolingual dataset that has the messages in English. **The star field is termed as class label** ### Data Fields * package_name: Name of the Software Application Package * review: Message of the user * date: the date when the user posted the review * star: rating provided by the user for the application * products: Name of the product/App ### Data Splits. - Training data: 259258 - Testing data: 28807
Sharathhebbar24/app_reviews_modded
[ "task_categories:text-classification", "task_categories:text-generation", "task_categories:text2text-generation", "task_categories:question-answering", "size_categories:100K<n<1M", "language:en", "license:mit", "app_reviews", "region:us" ]
2023-10-28T10:22:02+00:00
{"language": ["en"], "license": "mit", "size_categories": ["100K<n<1M"], "task_categories": ["text-classification", "text-generation", "text2text-generation", "question-answering"], "pretty_name": "app_reviews", "dataset_info": {"features": [{"name": "package_name", "dtype": "string"}, {"name": "review", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "star", "dtype": {"class_label": {"names": {"0": "1", "1": "2", "2": "3", "3": "4", "4": "5"}}}}, {"name": "products", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 34076493.880728304, "num_examples": 259258}, {"name": "test", "num_bytes": 3786350.1192716924, "num_examples": 28807}], "download_size": 16769656, "dataset_size": 37862844.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}], "tags": ["app_reviews"]}
2023-10-28T10:35:57+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_categories-text-generation #task_categories-text2text-generation #task_categories-question-answering #size_categories-100K<n<1M #language-English #license-mit #app_reviews #region-us
The modded version of URL ## Dataset Card for app-reviews-modded ### Dataset Summary It is a large dataset of Android applications belonging to 23 different app categories, which provides an overview of the types of feedback users report on the apps and documents the evolution of the related code metrics. The dataset contains about 395 applications of the F-Droid repository, including around 600 versions, 280,000 user reviews (extracted with specific text mining approaches) ### Supported Tasks and Leaderboards The dataset we provide comprises 395 different apps from the F-Droid repository, including code quality indicators of 629 versions of these apps. It also encloses app reviews related to each of these versions, which have been automatically categorized classifying types of user feedback from a software maintenance and evolution perspective. Languages The dataset is a monolingual dataset that has the messages in English. The star field is termed as class label ### Data Fields * package_name: Name of the Software Application Package * review: Message of the user * date: the date when the user posted the review * star: rating provided by the user for the application * products: Name of the product/App ### Data Splits. - Training data: 259258 - Testing data: 28807
[ "## Dataset Card for app-reviews-modded", "### Dataset Summary\nIt is a large dataset of Android applications belonging to 23 different app categories, which provides an overview of the types of feedback users report on the apps and documents the evolution of the related code metrics. The dataset contains about 395 applications of the F-Droid repository, including around 600 versions, 280,000 user reviews (extracted with specific text mining approaches)", "### Supported Tasks and Leaderboards\nThe dataset we provide comprises 395 different apps from the F-Droid repository, including code quality indicators of 629 versions of these apps. It also encloses app reviews related to each of these versions, which have been automatically categorized classifying types of user feedback from a software maintenance and evolution perspective.\n\nLanguages\nThe dataset is a monolingual dataset that has the messages in English.\n\nThe star field is termed as class label", "### Data Fields\n\n* package_name: Name of the Software Application Package\n* review: Message of the user\n* date: the date when the user posted the review\n* star: rating provided by the user for the application\n* products: Name of the product/App", "### Data Splits.\n- Training data: 259258\n- Testing data: 28807" ]
[ "TAGS\n#task_categories-text-classification #task_categories-text-generation #task_categories-text2text-generation #task_categories-question-answering #size_categories-100K<n<1M #language-English #license-mit #app_reviews #region-us \n", "## Dataset Card for app-reviews-modded", "### Dataset Summary\nIt is a large dataset of Android applications belonging to 23 different app categories, which provides an overview of the types of feedback users report on the apps and documents the evolution of the related code metrics. The dataset contains about 395 applications of the F-Droid repository, including around 600 versions, 280,000 user reviews (extracted with specific text mining approaches)", "### Supported Tasks and Leaderboards\nThe dataset we provide comprises 395 different apps from the F-Droid repository, including code quality indicators of 629 versions of these apps. It also encloses app reviews related to each of these versions, which have been automatically categorized classifying types of user feedback from a software maintenance and evolution perspective.\n\nLanguages\nThe dataset is a monolingual dataset that has the messages in English.\n\nThe star field is termed as class label", "### Data Fields\n\n* package_name: Name of the Software Application Package\n* review: Message of the user\n* date: the date when the user posted the review\n* star: rating provided by the user for the application\n* products: Name of the product/App", "### Data Splits.\n- Training data: 259258\n- Testing data: 28807" ]
[ 79, 12, 91, 109, 56, 21 ]
[ "passage: TAGS\n#task_categories-text-classification #task_categories-text-generation #task_categories-text2text-generation #task_categories-question-answering #size_categories-100K<n<1M #language-English #license-mit #app_reviews #region-us \n## Dataset Card for app-reviews-modded### Dataset Summary\nIt is a large dataset of Android applications belonging to 23 different app categories, which provides an overview of the types of feedback users report on the apps and documents the evolution of the related code metrics. The dataset contains about 395 applications of the F-Droid repository, including around 600 versions, 280,000 user reviews (extracted with specific text mining approaches)### Supported Tasks and Leaderboards\nThe dataset we provide comprises 395 different apps from the F-Droid repository, including code quality indicators of 629 versions of these apps. It also encloses app reviews related to each of these versions, which have been automatically categorized classifying types of user feedback from a software maintenance and evolution perspective.\n\nLanguages\nThe dataset is a monolingual dataset that has the messages in English.\n\nThe star field is termed as class label### Data Fields\n\n* package_name: Name of the Software Application Package\n* review: Message of the user\n* date: the date when the user posted the review\n* star: rating provided by the user for the application\n* products: Name of the product/App### Data Splits.\n- Training data: 259258\n- Testing data: 28807" ]
32a3d6864d277f6b92b34f310e03f43c6466bb1e
# Dataset Card for "100_image" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
22Plaruno/100_image
[ "region:us" ]
2023-10-28T10:36:08+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 10359951.0, "num_examples": 100}], "download_size": 0, "dataset_size": 10359951.0}}
2023-10-28T10:40:25+00:00
[]
[]
TAGS #region-us
# Dataset Card for "100_image" More Information needed
[ "# Dataset Card for \"100_image\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"100_image\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"100_image\"\n\nMore Information needed" ]
b50cb9ade72dcaacec9834fee73457bbfac0c586
# Data Kamus Daerah Indonesia ## Deskripsi Database ini berisi kumpulan kata-kata dari berbagai bahasa di Indonesia, termasuk kamus untuk menerjemahkan ke bahasa-bahasa regional seperti Lampung dan Jawa. Berikut adalah sumber data dan detail lebih lanjut tentang setiap kamus: 1. **Kumpulan Kata Indonesia** - [Sumber Data](https://github.com/Wikidepia/indonesian_datasets/tree/master/dictionary/wordlist/data) - Berisi kumpulan kata bahasa indonesia cat indonesia.txt 2. **Lampung-Indonesia** - [Sumber Data](https://github.com/meizano/lampung/blob/master/db/parser/Indonesia-Lampung.csv) - Berisi kumpulan kata bahasa lampung dan terjemahan indonesianya 3. **Jawa-Indonesia** - [Sumber Data](https://github.com/milstrike/Kamus-Jawa-Indonesia/blob/master/MyApplication/app/src/main/assets/databases/kamusjawaindonesia.sqlite) - Berisi kumpulan kata bahasa jawa dan terjemahan indonesianya ## Catatan Penting - Bahasa saya kumpulkan seiring waktu, jika berminat silahkan tambahkan bahasa daerah lain ke sini, kata pada bahasa daerah yang di inputkan mungkin belum lengkap. ---
mabzak/kamus-daerah-indo
[ "language:id", "region:us" ]
2023-10-28T10:36:09+00:00
{"language": ["id"]}
2023-10-30T13:04:12+00:00
[]
[ "id" ]
TAGS #language-Indonesian #region-us
# Data Kamus Daerah Indonesia ## Deskripsi Database ini berisi kumpulan kata-kata dari berbagai bahasa di Indonesia, termasuk kamus untuk menerjemahkan ke bahasa-bahasa regional seperti Lampung dan Jawa. Berikut adalah sumber data dan detail lebih lanjut tentang setiap kamus: 1. Kumpulan Kata Indonesia - Sumber Data - Berisi kumpulan kata bahasa indonesia cat URL 2. Lampung-Indonesia - Sumber Data - Berisi kumpulan kata bahasa lampung dan terjemahan indonesianya 3. Jawa-Indonesia - Sumber Data - Berisi kumpulan kata bahasa jawa dan terjemahan indonesianya ## Catatan Penting - Bahasa saya kumpulkan seiring waktu, jika berminat silahkan tambahkan bahasa daerah lain ke sini, kata pada bahasa daerah yang di inputkan mungkin belum lengkap. ---
[ "# Data Kamus Daerah Indonesia", "## Deskripsi\nDatabase ini berisi kumpulan kata-kata dari berbagai bahasa di Indonesia, termasuk kamus untuk menerjemahkan ke bahasa-bahasa regional seperti Lampung dan Jawa. Berikut adalah sumber data dan detail lebih lanjut tentang setiap kamus:\n\n1. Kumpulan Kata Indonesia\n - Sumber Data\n - Berisi kumpulan kata bahasa indonesia\ncat URL\n\n2. Lampung-Indonesia\n - Sumber Data\n - Berisi kumpulan kata bahasa lampung dan terjemahan indonesianya\n\n3. Jawa-Indonesia\n - Sumber Data\n - Berisi kumpulan kata bahasa jawa dan terjemahan indonesianya", "## Catatan Penting\n- Bahasa saya kumpulkan seiring waktu, jika berminat silahkan tambahkan bahasa daerah lain ke sini, kata pada bahasa daerah yang di inputkan mungkin belum lengkap.\n\n---" ]
[ "TAGS\n#language-Indonesian #region-us \n", "# Data Kamus Daerah Indonesia", "## Deskripsi\nDatabase ini berisi kumpulan kata-kata dari berbagai bahasa di Indonesia, termasuk kamus untuk menerjemahkan ke bahasa-bahasa regional seperti Lampung dan Jawa. Berikut adalah sumber data dan detail lebih lanjut tentang setiap kamus:\n\n1. Kumpulan Kata Indonesia\n - Sumber Data\n - Berisi kumpulan kata bahasa indonesia\ncat URL\n\n2. Lampung-Indonesia\n - Sumber Data\n - Berisi kumpulan kata bahasa lampung dan terjemahan indonesianya\n\n3. Jawa-Indonesia\n - Sumber Data\n - Berisi kumpulan kata bahasa jawa dan terjemahan indonesianya", "## Catatan Penting\n- Bahasa saya kumpulkan seiring waktu, jika berminat silahkan tambahkan bahasa daerah lain ke sini, kata pada bahasa daerah yang di inputkan mungkin belum lengkap.\n\n---" ]
[ 11, 6, 100, 36 ]
[ "passage: TAGS\n#language-Indonesian #region-us \n# Data Kamus Daerah Indonesia## Deskripsi\nDatabase ini berisi kumpulan kata-kata dari berbagai bahasa di Indonesia, termasuk kamus untuk menerjemahkan ke bahasa-bahasa regional seperti Lampung dan Jawa. Berikut adalah sumber data dan detail lebih lanjut tentang setiap kamus:\n\n1. Kumpulan Kata Indonesia\n - Sumber Data\n - Berisi kumpulan kata bahasa indonesia\ncat URL\n\n2. Lampung-Indonesia\n - Sumber Data\n - Berisi kumpulan kata bahasa lampung dan terjemahan indonesianya\n\n3. Jawa-Indonesia\n - Sumber Data\n - Berisi kumpulan kata bahasa jawa dan terjemahan indonesianya## Catatan Penting\n- Bahasa saya kumpulkan seiring waktu, jika berminat silahkan tambahkan bahasa daerah lain ke sini, kata pada bahasa daerah yang di inputkan mungkin belum lengkap.\n\n---" ]
c5ea81cca268f23f2ad67be31209db28193d703a
# Dataset Card for "image" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
22Plaruno/image
[ "region:us" ]
2023-10-28T10:41:08+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 10359951.0, "num_examples": 100}], "download_size": 0, "dataset_size": 10359951.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-28T10:48:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "image" More Information needed
[ "# Dataset Card for \"image\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"image\"\n\nMore Information needed" ]
[ 6, 11 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"image\"\n\nMore Information needed" ]
839180c060cb533dc15633f5541364d6b631fa7b
# Dataset Card for "face_ds" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
22Plaruno/face_ds
[ "region:us" ]
2023-10-28T10:53:59+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 10359951.0, "num_examples": 100}], "download_size": 10362224, "dataset_size": 10359951.0}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-28T10:54:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "face_ds" More Information needed
[ "# Dataset Card for \"face_ds\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"face_ds\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"face_ds\"\n\nMore Information needed" ]
7c8ad89a0ff98f130d703a5ada491ca96977795f
# Table of Contents 1. [Description](#description) 2. [Usage](#usage) 3. [Distribution](#distribution) - [Chapters Dataset](#chapters-dataset) - [Books Dataset](#books-dataset) 4. [Structure](#structure) 5. [Results and Comparison with kmfoda/booksum](#results-and-comparison-with-kmfodabooksum) # Description: This repository contains the Booksum dataset introduced in the paper [BookSum: A Collection of Datasets for Long-form Narrative Summarization ](https://arxiv.org/abs/2105.08209). This dataset includes both book and chapter summaries from the BookSum dataset (unlike the kmfoda/booksum one which only contains the chapter dataset). Some mismatched summaries have been corrected. Uneccessary columns has been discarded. Contains minimal text-to-summary rows. As there are multiple summaries for a given text, each row contains an array of summaries. # Usage Note: Make sure you have [>2.14.0 version of "datasets" library](https://github.com/huggingface/datasets/releases/tag/2.14.0) installed to load the dataset successfully. ``` from datasets import load_dataset book_data = load_dataset("ubaada/booksum-complete-cleaned", "books") chapter_data = load_dataset("ubaada/booksum-complete-cleaned", "chapters") # Print the 1st book print(book_data["train"][0]['text']) # Print the summary of the 1st book print(book_data["train"][0]['summary'][0]['text']) ``` # Distribution <div style="display: inline-block; vertical-align: top; width: 45%;"> ## Chapters Dataset | Split | Total Sum. | Missing Sum. | Successfully Processed | Chapters | |---------|------------|--------------|------------------------|------| | Train | 9712 | 178 | 9534 (98.17%) | 5653 | | Test | 1432 | 0 | 1432 (100.0%) | 950 | | Val | 1485 | 0 | 1485 (100.0%) | 854 | </div> <div style="display: inline-block; vertical-align: top; width: 45%; margin-left: 5%;"> ## Books Dataset | Split | Total Sum. | Missing Sum. | Successfully Processed | Books | |---------|------------|--------------|------------------------|------| | Train | 314 | 0 | 314 (100.0%) | 151 | | Test | 46 | 0 | 46 (100.0%) | 17 | | Val | 45 | 0 | 45 (100.0%) | 19 | </div> # Structure: ``` Chapters Dataset 0 - bid (book id) 1 - book_title 2 - chapter_id 3 - text (raw chapter text) 4 - summary (list of summaries from different sources) - {source, text (summary), analysis} ... 5 - is_aggregate (bool) (if true, then the text contains more than one chapter) Books Dataset: 0 - bid (book id) 1 - title 2 - text (raw text) 4 - summary (list of summaries from different sources) - {source, text (summary), analysis} ... ``` # Reults and Comparison with kmfoda/booksum Tested on the 'test' split of chapter sub-dataset. There are slight improvement on R1/R2 scores compared to another BookSum repo likely due to the work done on cleaning the misalignments in the alignment file. In the plot for this dataset, first summary \[0\] is chosen for each chapter. If best reference summary is chosen from the list for each chapter, theere are further improvements but are not shown here for fairness. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/62a7d1e152aa8695f9209345/lUNes4SFXVMdtebGMEJK0.png)
ubaada/booksum-complete-cleaned
[ "task_categories:summarization", "task_categories:text-generation", "size_categories:1K<n<10K", "language:en", "arxiv:2105.08209", "region:us" ]
2023-10-28T11:13:12+00:00
{"language": ["en"], "size_categories": ["1K<n<10K"], "task_categories": ["summarization", "text-generation"], "pretty_name": "BookSum Summarization Dataset Clean", "configs": [{"config_name": "books", "data_files": [{"split": "train", "path": "books/train.jsonl"}, {"split": "test", "path": "books/test.jsonl"}, {"split": "validation", "path": "books/val.jsonl"}]}, {"config_name": "chapters", "data_files": [{"split": "train", "path": "chapters/train.jsonl"}, {"split": "test", "path": "chapters/test.jsonl"}, {"split": "validation", "path": "chapters/val.jsonl"}]}]}
2023-11-02T09:58:39+00:00
[ "2105.08209" ]
[ "en" ]
TAGS #task_categories-summarization #task_categories-text-generation #size_categories-1K<n<10K #language-English #arxiv-2105.08209 #region-us
Table of Contents ================= 1. Description 2. Usage 3. Distribution * Chapters Dataset * Books Dataset 4. Structure 5. Results and Comparison with kmfoda/booksum Description: ============ This repository contains the Booksum dataset introduced in the paper BookSum: A Collection of Datasets for Long-form Narrative Summarization . This dataset includes both book and chapter summaries from the BookSum dataset (unlike the kmfoda/booksum one which only contains the chapter dataset). Some mismatched summaries have been corrected. Uneccessary columns has been discarded. Contains minimal text-to-summary rows. As there are multiple summaries for a given text, each row contains an array of summaries. Usage ===== Note: Make sure you have >2.14.0 version of "datasets" library installed to load the dataset successfully. Distribution ============ Chapters Dataset ---------------- Books Dataset ------------- Structure: ========== Reults and Comparison with kmfoda/booksum ========================================= Tested on the 'test' split of chapter sub-dataset. There are slight improvement on R1/R2 scores compared to another BookSum repo likely due to the work done on cleaning the misalignments in the alignment file. In the plot for this dataset, first summary [0] is chosen for each chapter. If best reference summary is chosen from the list for each chapter, theere are further improvements but are not shown here for fairness. !image/png
[]
[ "TAGS\n#task_categories-summarization #task_categories-text-generation #size_categories-1K<n<10K #language-English #arxiv-2105.08209 #region-us \n" ]
[ 52 ]
[ "passage: TAGS\n#task_categories-summarization #task_categories-text-generation #size_categories-1K<n<10K #language-English #arxiv-2105.08209 #region-us \n" ]
0f2d9e1c5edd13304a2e03fcb8482ccf605ae932
# SAFE Sequential Attachment-based Fragment Embedding (SAFE) is a novel molecular line notation that represents molecules as an unordered sequence of fragment blocks to improve molecule design using generative models. This is the drugs dataset used for benchmarking. Find the details and how to use at SAFE in the repo https://github.com/datamol-io/safe or the paper https://arxiv.org/pdf/2310.10773.pdf.
datamol-io/safe-drugs
[ "license:cc-by-4.0", "arxiv:2310.10773", "region:us" ]
2023-10-28T11:18:50+00:00
{"license": "cc-by-4.0", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}], "dataset_info": {"features": [{"name": "dosed_ingredient", "dtype": "bool"}, {"name": "indication_class", "dtype": "string"}, {"name": "molecule_chembl_id", "dtype": "string"}, {"name": "molecule_type", "dtype": "string"}, {"name": "oral", "dtype": "bool"}, {"name": "pref_name", "dtype": "string"}, {"name": "therapeutic_flag", "dtype": "bool"}, {"name": "usan_stem", "dtype": "string"}, {"name": "usan_stem_definition", "dtype": "string"}, {"name": "usan_year", "dtype": "float64"}, {"name": "withdrawn_flag", "dtype": "bool"}, {"name": "smiles", "dtype": "string"}, {"name": "inchikey", "dtype": "string"}, {"name": "slices", "dtype": "string"}, {"name": "morphing", "dtype": "string"}, {"name": "motif", "dtype": "string"}, {"name": "scaffold", "dtype": "string"}, {"name": "superstructure", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 12691, "num_examples": 26}], "download_size": 18556, "dataset_size": 12691}}
2023-10-28T11:23:11+00:00
[ "2310.10773" ]
[]
TAGS #license-cc-by-4.0 #arxiv-2310.10773 #region-us
# SAFE Sequential Attachment-based Fragment Embedding (SAFE) is a novel molecular line notation that represents molecules as an unordered sequence of fragment blocks to improve molecule design using generative models. This is the drugs dataset used for benchmarking. Find the details and how to use at SAFE in the repo URL or the paper URL
[ "# SAFE\n\nSequential Attachment-based Fragment Embedding (SAFE) is a novel molecular line notation that represents molecules as an unordered sequence of fragment blocks to improve molecule design using generative models.\n\nThis is the drugs dataset used for benchmarking.\n\nFind the details and how to use at SAFE in the repo URL or the paper URL" ]
[ "TAGS\n#license-cc-by-4.0 #arxiv-2310.10773 #region-us \n", "# SAFE\n\nSequential Attachment-based Fragment Embedding (SAFE) is a novel molecular line notation that represents molecules as an unordered sequence of fragment blocks to improve molecule design using generative models.\n\nThis is the drugs dataset used for benchmarking.\n\nFind the details and how to use at SAFE in the repo URL or the paper URL" ]
[ 24, 82 ]
[ "passage: TAGS\n#license-cc-by-4.0 #arxiv-2310.10773 #region-us \n# SAFE\n\nSequential Attachment-based Fragment Embedding (SAFE) is a novel molecular line notation that represents molecules as an unordered sequence of fragment blocks to improve molecule design using generative models.\n\nThis is the drugs dataset used for benchmarking.\n\nFind the details and how to use at SAFE in the repo URL or the paper URL" ]
b385b322970eef433de8fbd129a34b707291d7af
# Dataset Card for "tts_face" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
linhtran92/tts_face
[ "region:us" ]
2023-10-28T11:25:03+00:00
{"dataset_info": {"features": [{"name": "sentence_norm", "dtype": "string"}, {"name": "audio", "struct": [{"name": "array", "sequence": "float64"}, {"name": "path", "dtype": "null"}, {"name": "sampling_rate", "dtype": "int64"}]}, {"name": "id", "dtype": "string"}, {"name": "wer", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 84090967, "num_examples": 225}], "download_size": 64738623, "dataset_size": 84090967}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-28T13:51:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tts_face" More Information needed
[ "# Dataset Card for \"tts_face\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tts_face\"\n\nMore Information needed" ]
[ 6, 14 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"tts_face\"\n\nMore Information needed" ]
d6470a880bccb9b90e8d251bec8abdec02d5ae7e
# Projeto Banco-Imagem ### Nome do aluno Enio Kilder Oliveira da Silva |**Tipo de Projeto**|**Modelo Selecionado**|**Linguagem**| |--|--|--| Classificação de Objetos |YOLOv5|PyTorch| ## Performance O modelo treinado possui performance de **98.6%**. ### Output do bloco de treinamento <details> <summary>Expandir Conteúdo!</summary> ```text %%time %cd ../yolov5 !python classify/train.py --model yolov5n-cls.pt --data $DATASET_NAME --epochs 128 --batch 16 --img 320 --pretrained weights/yolov5n-cls.pt /content/yolov5 2023-10-28 01:49:35.242300: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered 2023-10-28 01:49:35.242363: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered 2023-10-28 01:49:35.242406: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered classify/train: model=yolov5n-cls.pt, data=Banco-Imagem-1, epochs=128, batch_size=16, imgsz=320, nosave=False, cache=None, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=weights/yolov5n-cls.pt, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1 github: up to date with https://github.com/ultralytics/yolov5 ✅ YOLOv5 🚀 v7.0-230-g53efd07 Python-3.10.12 torch-2.1.0+cu118 CUDA:0 (Tesla T4, 15102MiB) TensorBoard: Start with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/ albumentations: RandomResizedCrop(p=1.0, height=320, width=320, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=1), HorizontalFlip(p=0.5), ColorJitter(p=0.5, brightness=[0.6, 1.4], contrast=[0.6, 1.4], saturation=[0.6, 1.4], hue=[0, 0]), Normalize(p=1.0, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0), ToTensorV2(always_apply=True, p=1.0, transpose_mask=False) Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt to yolov5n-cls.pt... 100% 4.87M/4.87M [00:00<00:00, 48.4MB/s] Model summary: 149 layers, 1218405 parameters, 1218405 gradients, 3.0 GFLOPs optimizer: Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias Image sizes 320 train, 320 test Using 1 dataloader workers Logging results to runs/train-cls/exp Starting yolov5n-cls.pt training on Banco-Imagem-1 dataset with 5 classes for 128 epochs... Epoch GPU_mem train_loss test_loss top1_acc top5_acc 1/128 0.508G 1.55 1.51 0.194 1: 100% 16/16 [00:06<00:00, 2.59it/s] 2/128 0.508G 1.39 1.86 0.222 1: 100% 16/16 [00:02<00:00, 6.81it/s] 3/128 0.508G 1.4 2.07 0.194 1: 100% 16/16 [00:02<00:00, 7.04it/s] 4/128 0.508G 1.35 1.75 0.222 1: 100% 16/16 [00:02<00:00, 6.38it/s] 5/128 0.508G 1.34 2.17 0.222 1: 100% 16/16 [00:02<00:00, 5.51it/s] 6/128 0.508G 1.26 1.76 0.25 1: 100% 16/16 [00:04<00:00, 3.51it/s] 7/128 0.508G 1.32 1.3 0.306 1: 100% 16/16 [00:02<00:00, 6.76it/s] 8/128 0.508G 1.27 1.57 0.333 1: 100% 16/16 [00:02<00:00, 6.99it/s] 9/128 0.508G 1.38 1.5 0.306 1: 100% 16/16 [00:02<00:00, 6.51it/s] 10/128 0.508G 1.3 1.39 0.278 1: 100% 16/16 [00:02<00:00, 5.73it/s] 11/128 0.508G 1.3 1.55 0.361 1: 100% 16/16 [00:03<00:00, 4.95it/s] 12/128 0.508G 1.28 1.45 0.306 1: 100% 16/16 [00:02<00:00, 6.98it/s] 13/128 0.508G 1.28 1.33 0.528 1: 100% 16/16 [00:02<00:00, 6.34it/s] 14/128 0.508G 1.24 1.19 0.417 1: 100% 16/16 [00:02<00:00, 6.90it/s] 15/128 0.508G 1.27 1.81 0.222 1: 100% 16/16 [00:03<00:00, 4.79it/s] 16/128 0.508G 1.25 1.52 0.361 1: 100% 16/16 [00:02<00:00, 6.45it/s] 17/128 0.508G 1.28 1.2 0.361 1: 100% 16/16 [00:02<00:00, 6.15it/s] 18/128 0.508G 1.25 1.33 0.528 1: 100% 16/16 [00:02<00:00, 6.79it/s] 19/128 0.508G 1.18 1.17 0.5 1: 100% 16/16 [00:02<00:00, 6.67it/s] 20/128 0.508G 1.23 1.33 0.306 1: 100% 16/16 [00:04<00:00, 3.52it/s] 21/128 0.508G 1.21 1.39 0.417 1: 100% 16/16 [00:02<00:00, 6.89it/s] 22/128 0.508G 1.18 1.36 0.528 1: 100% 16/16 [00:02<00:00, 6.43it/s] 23/128 0.508G 1.14 1.38 0.5 1: 100% 16/16 [00:02<00:00, 6.70it/s] 24/128 0.508G 1.17 1.3 0.556 1: 100% 16/16 [00:03<00:00, 4.59it/s] 25/128 0.508G 1.2 1.13 0.583 1: 100% 16/16 [00:02<00:00, 6.26it/s] 26/128 0.508G 1.11 1.12 0.528 1: 100% 16/16 [00:02<00:00, 6.69it/s] 27/128 0.508G 1.12 1.06 0.583 1: 100% 16/16 [00:02<00:00, 6.37it/s] 28/128 0.508G 1.12 1.45 0.417 1: 100% 16/16 [00:02<00:00, 6.95it/s] 29/128 0.508G 1.19 1.11 0.5 1: 100% 16/16 [00:03<00:00, 4.33it/s] 30/128 0.508G 1.14 1.2 0.583 1: 100% 16/16 [00:02<00:00, 6.86it/s] 31/128 0.508G 1.1 1.34 0.5 1: 100% 16/16 [00:02<00:00, 5.83it/s] 32/128 0.508G 1.17 2.32 0.278 1: 100% 16/16 [00:02<00:00, 6.40it/s] 33/128 0.508G 1.11 1.02 0.667 1: 100% 16/16 [00:02<00:00, 5.47it/s] 34/128 0.508G 1.16 1.37 0.5 1: 100% 16/16 [00:03<00:00, 5.17it/s] 35/128 0.508G 1.1 1.12 0.472 1: 100% 16/16 [00:02<00:00, 6.79it/s] 36/128 0.508G 1.08 1.2 0.556 1: 100% 16/16 [00:03<00:00, 4.22it/s] 37/128 0.508G 1.11 1.08 0.556 1: 100% 16/16 [00:02<00:00, 6.21it/s] 38/128 0.508G 1.13 1.26 0.528 1: 100% 16/16 [00:03<00:00, 4.65it/s] 39/128 0.508G 1.12 1.11 0.667 1: 100% 16/16 [00:02<00:00, 6.73it/s] 40/128 0.508G 1.11 1.19 0.639 1: 100% 16/16 [00:02<00:00, 6.53it/s] 41/128 0.508G 1.07 0.947 0.556 1: 100% 16/16 [00:02<00:00, 6.87it/s] 42/128 0.508G 1.07 1.18 0.611 1: 100% 16/16 [00:03<00:00, 5.17it/s] 43/128 0.508G 1.14 1.44 0.528 1: 100% 16/16 [00:02<00:00, 5.41it/s] 44/128 0.508G 1.05 1.01 0.667 1: 100% 16/16 [00:02<00:00, 6.64it/s] 45/128 0.508G 1.08 1.14 0.639 1: 100% 16/16 [00:02<00:00, 6.77it/s] 46/128 0.508G 1.07 1.33 0.528 1: 100% 16/16 [00:02<00:00, 6.31it/s] 47/128 0.508G 1.03 1 0.639 1: 100% 16/16 [00:03<00:00, 4.78it/s] 48/128 0.508G 1.04 1.71 0.611 1: 100% 16/16 [00:02<00:00, 5.78it/s] 49/128 0.508G 1.04 1.64 0.528 1: 100% 16/16 [00:02<00:00, 6.66it/s] 50/128 0.508G 1.02 1 0.75 1: 100% 16/16 [00:02<00:00, 6.63it/s] 51/128 0.508G 1.02 1.11 0.667 1: 100% 16/16 [00:02<00:00, 6.63it/s] 52/128 0.508G 1.06 1.59 0.611 1: 100% 16/16 [00:03<00:00, 4.26it/s] 53/128 0.508G 0.973 1.07 0.667 1: 100% 16/16 [00:02<00:00, 6.46it/s] 54/128 0.508G 0.925 1.34 0.556 1: 100% 16/16 [00:02<00:00, 6.46it/s] 55/128 0.508G 1.1 0.927 0.667 1: 100% 16/16 [00:03<00:00, 4.46it/s] 56/128 0.508G 1 1.97 0.583 1: 100% 16/16 [00:05<00:00, 3.06it/s] 57/128 0.508G 0.993 1.34 0.611 1: 100% 16/16 [00:02<00:00, 6.75it/s] 58/128 0.508G 0.954 1.17 0.639 1: 100% 16/16 [00:02<00:00, 6.50it/s] 59/128 0.508G 1.03 1.54 0.5 1: 100% 16/16 [00:02<00:00, 6.59it/s] 60/128 0.508G 1.01 1.12 0.611 1: 100% 16/16 [00:03<00:00, 5.32it/s] 61/128 0.508G 1 1.13 0.583 1: 100% 16/16 [00:03<00:00, 5.28it/s] 62/128 0.508G 0.943 0.986 0.639 1: 100% 16/16 [00:02<00:00, 6.75it/s] 63/128 0.508G 0.909 1.12 0.639 1: 100% 16/16 [00:02<00:00, 6.97it/s] 64/128 0.508G 0.888 0.867 0.75 1: 100% 16/16 [00:02<00:00, 6.32it/s] 65/128 0.508G 0.958 0.975 0.667 1: 100% 16/16 [00:03<00:00, 4.41it/s] 66/128 0.508G 0.939 0.947 0.639 1: 100% 16/16 [00:02<00:00, 6.54it/s] 67/128 0.508G 1.02 1.11 0.694 1: 100% 16/16 [00:03<00:00, 5.04it/s] 68/128 0.508G 0.998 0.971 0.667 1: 100% 16/16 [00:02<00:00, 5.55it/s] 69/128 0.508G 0.968 0.98 0.694 1: 100% 16/16 [00:03<00:00, 4.52it/s] 70/128 0.508G 0.965 1.11 0.722 1: 100% 16/16 [00:02<00:00, 6.55it/s] 71/128 0.508G 0.965 1.47 0.583 1: 100% 16/16 [00:02<00:00, 6.84it/s] 72/128 0.508G 0.953 1.2 0.611 1: 100% 16/16 [00:02<00:00, 6.54it/s] 73/128 0.508G 0.863 0.772 0.722 1: 100% 16/16 [00:02<00:00, 6.90it/s] 74/128 0.508G 0.946 0.884 0.667 1: 100% 16/16 [00:03<00:00, 4.25it/s] 75/128 0.508G 0.911 0.942 0.694 1: 100% 16/16 [00:02<00:00, 6.78it/s] 76/128 0.508G 0.964 1.16 0.694 1: 100% 16/16 [00:02<00:00, 6.80it/s] 77/128 0.508G 0.917 1.2 0.694 1: 100% 16/16 [00:02<00:00, 6.44it/s] 78/128 0.508G 0.941 0.955 0.639 1: 100% 16/16 [00:02<00:00, 6.22it/s] 79/128 0.508G 0.885 1.02 0.722 1: 100% 16/16 [00:03<00:00, 4.58it/s] 80/128 0.508G 0.864 0.802 0.694 1: 100% 16/16 [00:02<00:00, 6.33it/s] 81/128 0.508G 0.908 1.11 0.833 1: 100% 16/16 [00:02<00:00, 6.52it/s] 82/128 0.508G 0.915 0.843 0.778 1: 100% 16/16 [00:02<00:00, 6.82it/s] 83/128 0.508G 0.899 1.14 0.722 1: 100% 16/16 [00:03<00:00, 4.96it/s] 84/128 0.508G 0.826 0.81 0.75 1: 100% 16/16 [00:02<00:00, 5.77it/s] 85/128 0.508G 0.831 0.883 0.694 1: 100% 16/16 [00:02<00:00, 6.61it/s] 86/128 0.508G 0.804 0.95 0.694 1: 100% 16/16 [00:02<00:00, 6.42it/s] 87/128 0.508G 0.805 0.916 0.694 1: 100% 16/16 [00:02<00:00, 6.60it/s] 88/128 0.508G 0.824 0.936 0.667 1: 100% 16/16 [00:03<00:00, 4.40it/s] 89/128 0.508G 0.854 0.854 0.639 1: 100% 16/16 [00:02<00:00, 6.48it/s] 90/128 0.508G 0.79 1.14 0.694 1: 100% 16/16 [00:02<00:00, 6.72it/s] 91/128 0.508G 0.83 0.848 0.75 1: 100% 16/16 [00:02<00:00, 6.59it/s] 92/128 0.508G 0.805 1.32 0.639 1: 100% 16/16 [00:02<00:00, 6.47it/s] 93/128 0.508G 0.813 1.22 0.75 1: 100% 16/16 [00:03<00:00, 4.23it/s] 94/128 0.508G 0.796 0.91 0.722 1: 100% 16/16 [00:02<00:00, 6.68it/s] 95/128 0.508G 0.823 0.778 0.75 1: 100% 16/16 [00:02<00:00, 6.70it/s] 96/128 0.508G 0.827 0.898 0.806 1: 100% 16/16 [00:02<00:00, 6.50it/s] 97/128 0.508G 0.777 0.833 0.778 1: 100% 16/16 [00:02<00:00, 5.78it/s] 98/128 0.508G 0.79 0.735 0.806 1: 100% 16/16 [00:03<00:00, 4.78it/s] 99/128 0.508G 0.824 0.797 0.778 1: 100% 16/16 [00:02<00:00, 6.19it/s] 100/128 0.508G 0.802 0.893 0.806 1: 100% 16/16 [00:02<00:00, 5.94it/s] 101/128 0.508G 0.778 1.11 0.778 1: 100% 16/16 [00:02<00:00, 6.61it/s] 102/128 0.508G 0.795 1.15 0.722 1: 100% 16/16 [00:03<00:00, 4.30it/s] 103/128 0.508G 0.777 1.54 0.667 1: 100% 16/16 [00:02<00:00, 6.39it/s] 104/128 0.508G 0.764 0.916 0.722 1: 100% 16/16 [00:02<00:00, 6.66it/s] 105/128 0.508G 0.737 1.04 0.778 1: 100% 16/16 [00:02<00:00, 6.57it/s] 106/128 0.508G 0.689 0.792 0.75 1: 100% 16/16 [00:02<00:00, 6.55it/s] 107/128 0.508G 0.769 0.945 0.75 1: 100% 16/16 [00:03<00:00, 4.40it/s] 108/128 0.508G 0.78 1.21 0.75 1: 100% 16/16 [00:02<00:00, 6.61it/s] 109/128 0.508G 0.768 0.958 0.75 1: 100% 16/16 [00:02<00:00, 6.37it/s] 110/128 0.508G 0.802 0.953 0.75 1: 100% 16/16 [00:02<00:00, 6.41it/s] 111/128 0.508G 0.765 0.71 0.75 1: 100% 16/16 [00:02<00:00, 5.42it/s] 112/128 0.508G 0.709 1.07 0.722 1: 100% 16/16 [00:03<00:00, 5.15it/s] 113/128 0.508G 0.683 1.1 0.694 1: 100% 16/16 [00:02<00:00, 6.57it/s] 114/128 0.508G 0.685 0.892 0.778 1: 100% 16/16 [00:02<00:00, 6.41it/s] 115/128 0.508G 0.678 0.78 0.722 1: 100% 16/16 [00:02<00:00, 6.25it/s] 116/128 0.508G 0.714 1.19 0.722 1: 100% 16/16 [00:03<00:00, 4.29it/s] 117/128 0.508G 0.718 0.777 0.694 1: 100% 16/16 [00:02<00:00, 6.04it/s] 118/128 0.508G 0.744 0.855 0.778 1: 100% 16/16 [00:02<00:00, 6.72it/s] 119/128 0.508G 0.732 0.708 0.75 1: 100% 16/16 [00:02<00:00, 6.66it/s] 120/128 0.508G 0.7 0.88 0.778 1: 100% 16/16 [00:02<00:00, 5.85it/s] 121/128 0.508G 0.687 0.852 0.778 1: 100% 16/16 [00:03<00:00, 4.65it/s] 122/128 0.508G 0.671 1.01 0.778 1: 100% 16/16 [00:02<00:00, 6.46it/s] 123/128 0.508G 0.695 0.708 0.75 1: 100% 16/16 [00:02<00:00, 6.40it/s] 124/128 0.508G 0.685 0.725 0.778 1: 100% 16/16 [00:02<00:00, 6.69it/s] 125/128 0.508G 0.681 0.991 0.75 1: 100% 16/16 [00:03<00:00, 4.79it/s] 126/128 0.508G 0.674 0.72 0.75 1: 100% 16/16 [00:03<00:00, 4.96it/s] 127/128 0.508G 0.674 0.733 0.75 1: 100% 16/16 [00:02<00:00, 6.52it/s] 128/128 0.508G 0.687 0.682 0.75 1: 100% 16/16 [00:02<00:00, 6.48it/s] Training complete (0.105 hours) Results saved to runs/train-cls/exp Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data Banco-Imagem-1 Export: python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt') Visualize: https://netron.app CPU times: user 4.67 s, sys: 452 ms, total: 5.12 s Wall time: 6min 43s !python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data $DATASET_NAME classify/val: data=Banco-Imagem-1, weights=['runs/train-cls/exp/weights/best.pt'], batch_size=128, imgsz=224, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=False, dnn=False YOLOv5 🚀 v7.0-230-g53efd07 Python-3.10.12 torch-2.1.0+cu118 CUDA:0 (Tesla T4, 15102MiB) Fusing layers... Model summary: 117 layers, 1214869 parameters, 0 gradients, 2.9 GFLOPs testing: 100% 1/1 [00:00<00:00, 1.05it/s] Class Images top1_acc top5_acc all 36 0.639 1 avioes 7 0.571 1 barcos 6 0.667 1 carros 11 0.545 1 helicopteros 8 0.875 1 motos 4 0.5 1 Speed: 0.1ms pre-process, 14.8ms inference, 0.6ms post-process per image at shape (1, 3, 224, 224) Results saved to runs/val-cls/exp ``` </details> ### Evidências do treinamento #### Gráficos de precisão e perdas ![Descrição](https://i.imgur.com/wgvXUB6.jpg) #### Matriz de Confusão ![Descrição](https://i.imgur.com/3wAANRi.jpg) #### Inferindo com o modelo personalizado ``` #Pega a localização de uma imagem do conjunto de testes ou validações if os.path.exists(os.path.join(dataset.location, "test")): split_path = os.path.join(dataset.location, "test") else: os.path.join(dataset.location, "valid") example_class = os.listdir(split_path)[4] example_image_name = os.listdir(os.path.join(split_path, example_class))[4] example_image_path = os.path.join(split_path, example_class, example_image_name) os.environ["TEST_IMAGE_PATH"] = example_image_path print(f"Inferindo sobre um exemplo da classe '{example_class}'") #Infer !python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source $TEST_IMAGE_PATH Inferindo sobre um exemplo da classe 'carros' classify/predict: weights=['runs/train-cls/exp/weights/best.pt'], source=/content/yolov5/Banco-Imagem-1/test/carros/00012_jpg.rf.9f0d32646e83139878c5788b040038f7.jpg, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=False, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1 YOLOv5 🚀 v7.0-230-g53efd07 Python-3.10.12 torch-2.1.0+cu118 CUDA:0 (Tesla T4, 15102MiB) Fusing layers... Model summary: 117 layers, 1214869 parameters, 0 gradients, 2.9 GFLOPs image 1/1 /content/yolov5/Banco-Imagem-1/test/carros/00012_jpg.rf.9f0d32646e83139878c5788b040038f7.jpg: 224x224 carros 0.91, avioes 0.08, motos 0.01, helicopteros 0.00, barcos 0.00, 2.7ms Speed: 0.3ms pre-process, 2.7ms inference, 5.1ms NMS per image at shape (1, 3, 224, 224) Results saved to runs/predict-cls/exp14 ``` ``` #### Modelo treinado com 80% ou mais de acurácia/precisão ========================================================= ``` ![Descrição](https://i.imgur.com/GB9Tihf.jpg) ``` #carro import requests image_url = "https://i.imgur.com/GB9Tihf.jpg" response = requests.get(image_url) response.raise_for_status() with open('carro.jpg', 'wb') as handler: handler.write(response.content) !python classify/predict.py --weights ./weights/yolov5x-cls.pt --source carro.jpg classify/predict: weights=['./weigths/yolov5x-cls.pt'], source=carro.jpg, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=False, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1 YOLOv5 🚀 v7.0-230-g53efd07 Python-3.10.12 torch-2.1.0+cu118 CUDA:0 (Tesla T4, 15102MiB) Fusing layers... Model summary: 264 layers, 48072600 parameters, 0 gradients, 129.9 GFLOPs image 1/1 /content/yolov5/carro.jpg: 224x224 sports car 0.95, race car 0.02, convertible 0.01, car wheel 0.00, grille 0.00, 12.9ms Speed: 0.4ms pre-process, 12.9ms inference, 6.9ms NMS per image at shape (1, 3, 224, 224) Results saved to runs/predict-cls/exp13 ### Modelo treinado com ao menos 50% de acurácia/precisão ========================================================= ``` ![Descrição](https://i.imgur.com/ASwjAT5.jpg) ``` #Moto import requests image_url = "https://i.imgur.com/ASwjAT5.jpg" response = requests.get(image_url) response.raise_for_status() with open('moto.jpg', 'wb') as handler: handler.write(response.content) !python classify/predict.py --weights ./weights/yolov5m-cls.pt --source moto.jpg classify/predict: weights=['./weigths/yolov5m-cls.pt'], source=moto.jpg, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=False, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1 YOLOv5 🚀 v7.0-230-g53efd07 Python-3.10.12 torch-2.1.0+cu118 CUDA:0 (Tesla T4, 15102MiB) Fusing layers... Model summary: 166 layers, 12947192 parameters, 0 gradients, 31.7 GFLOPs image 1/1 /content/yolov5/moto.jpg: 224x224 moped 0.64, scooter 0.17, disc brake 0.06, crash helmet 0.05, snowmobile 0.01, 5.4ms Speed: 0.4ms pre-process, 5.4ms inference, 6.9ms NMS per image at shape (1, 3, 224, 224) Results saved to runs/predict-cls/exp16 ``` ## Roboflow Banco-Imagem > 2023-10-24 9:29pm https://universe.roboflow.com/eniokilder/banco-imagem Provided by a Roboflow user License: CC BY 4.0 ## HuggingFace Link para o HuggingFace: https://huggingface.co/datasets/eniokilder/Banco-Imagem
eniokilder/Banco-Imagem
[ "region:us" ]
2023-10-28T11:31:58+00:00
{}
2023-10-28T11:48:31+00:00
[]
[]
TAGS #region-us
Projeto ======= Banco-Imagem ### Nome do aluno Enio Kilder Oliveira da Silva Tipo de Projeto: Classificação de Objetos, Modelo Selecionado: YOLOv5, Linguagem: PyTorch Performance ----------- O modelo treinado possui performance de 98.6%. ### Output do bloco de treinamento Expandir Conteúdo! ### Evidências do treinamento #### Gráficos de precisão e perdas !Descrição #### Matriz de Confusão !Descrição #### Inferindo com o modelo personalizado !Descrição !Descrição Roboflow -------- Banco-Imagem > 2023-10-24 9:29pm URL Provided by a Roboflow user License: CC BY 4.0 HuggingFace ----------- Link para o HuggingFace: URL
[ "### Nome do aluno\n\n\nEnio Kilder Oliveira da Silva\n\n\nTipo de Projeto: Classificação de Objetos, Modelo Selecionado: YOLOv5, Linguagem: PyTorch\n\n\nPerformance\n-----------\n\n\nO modelo treinado possui performance de 98.6%.", "### Output do bloco de treinamento\n\n\n\nExpandir Conteúdo!", "### Evidências do treinamento", "#### Gráficos de precisão e perdas\n\n\n!Descrição", "#### Matriz de Confusão\n\n\n!Descrição", "#### Inferindo com o modelo personalizado\n\n\n!Descrição\n\n\n!Descrição\n\n\nRoboflow\n--------\n\n\nBanco-Imagem > 2023-10-24 9:29pm\n\n\nURL\n\n\nProvided by a Roboflow user\nLicense: CC BY 4.0\n\n\nHuggingFace\n-----------\n\n\nLink para o HuggingFace:\n\n\nURL" ]
[ "TAGS\n#region-us \n", "### Nome do aluno\n\n\nEnio Kilder Oliveira da Silva\n\n\nTipo de Projeto: Classificação de Objetos, Modelo Selecionado: YOLOv5, Linguagem: PyTorch\n\n\nPerformance\n-----------\n\n\nO modelo treinado possui performance de 98.6%.", "### Output do bloco de treinamento\n\n\n\nExpandir Conteúdo!", "### Evidências do treinamento", "#### Gráficos de precisão e perdas\n\n\n!Descrição", "#### Matriz de Confusão\n\n\n!Descrição", "#### Inferindo com o modelo personalizado\n\n\n!Descrição\n\n\n!Descrição\n\n\nRoboflow\n--------\n\n\nBanco-Imagem > 2023-10-24 9:29pm\n\n\nURL\n\n\nProvided by a Roboflow user\nLicense: CC BY 4.0\n\n\nHuggingFace\n-----------\n\n\nLink para o HuggingFace:\n\n\nURL" ]
[ 6, 55, 16, 7, 14, 11, 58 ]
[ "passage: TAGS\n#region-us \n### Nome do aluno\n\n\nEnio Kilder Oliveira da Silva\n\n\nTipo de Projeto: Classificação de Objetos, Modelo Selecionado: YOLOv5, Linguagem: PyTorch\n\n\nPerformance\n-----------\n\n\nO modelo treinado possui performance de 98.6%.### Output do bloco de treinamento\n\n\n\nExpandir Conteúdo!### Evidências do treinamento#### Gráficos de precisão e perdas\n\n\n!Descrição#### Matriz de Confusão\n\n\n!Descrição#### Inferindo com o modelo personalizado\n\n\n!Descrição\n\n\n!Descrição\n\n\nRoboflow\n--------\n\n\nBanco-Imagem > 2023-10-24 9:29pm\n\n\nURL\n\n\nProvided by a Roboflow user\nLicense: CC BY 4.0\n\n\nHuggingFace\n-----------\n\n\nLink para o HuggingFace:\n\n\nURL" ]
eba4b3e186f9e1912ec655ed642651b97ec50880
Print ("This dataset includes 580 Q/A records, not separated, not cleaned yet.-I am working to clean it up-therefore I'm not sharing it publicly.")
ali-vakil/PMP_QA_dataset_not_clean
[ "task_categories:question-answering", "size_categories:n<1K", "language:en", "license:apache-2.0", "region:us" ]
2023-10-28T11:32:43+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["n<1K"], "task_categories": ["question-answering"], "pretty_name": "PMP test QA"}
2023-11-12T20:48:00+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #size_categories-n<1K #language-English #license-apache-2.0 #region-us
Print ("This dataset includes 580 Q/A records, not separated, not cleaned yet.-I am working to clean it up-therefore I'm not sharing it publicly.")
[]
[ "TAGS\n#task_categories-question-answering #size_categories-n<1K #language-English #license-apache-2.0 #region-us \n" ]
[ 40 ]
[ "passage: TAGS\n#task_categories-question-answering #size_categories-n<1K #language-English #license-apache-2.0 #region-us \n" ]
b83175cd7394e7a4027478a35b2f9d1dda3ac62f
# SAFE Sequential Attachment-based Fragment Embedding (SAFE) is a novel molecular line notation that represents molecules as an unordered sequence of fragment blocks to improve molecule design using generative models. Find the details and how to use at SAFE in the repo https://github.com/datamol-io/safe or the paper https://arxiv.org/pdf/2310.10773.pdf.
datamol-io/safe-gpt
[ "license:cc-by-4.0", "arxiv:2310.10773", "region:us" ]
2023-10-28T11:33:55+00:00
{"license": "cc-by-4.0", "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "input", "dtype": "string"}, {"name": "mc_labels", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 203939038678, "num_examples": 945455307}, {"name": "test", "num_bytes": 25523244912, "num_examples": 118890444}, {"name": "validation", "num_bytes": 24920275439, "num_examples": 118451032}], "download_size": 270730145, "dataset_size": 254382559029}}
2023-10-28T12:48:45+00:00
[ "2310.10773" ]
[]
TAGS #license-cc-by-4.0 #arxiv-2310.10773 #region-us
# SAFE Sequential Attachment-based Fragment Embedding (SAFE) is a novel molecular line notation that represents molecules as an unordered sequence of fragment blocks to improve molecule design using generative models. Find the details and how to use at SAFE in the repo URL or the paper URL
[ "# SAFE\n\nSequential Attachment-based Fragment Embedding (SAFE) is a novel molecular line notation that represents molecules as an unordered sequence of fragment blocks to improve molecule design using generative models.\n\nFind the details and how to use at SAFE in the repo URL or the paper URL" ]
[ "TAGS\n#license-cc-by-4.0 #arxiv-2310.10773 #region-us \n", "# SAFE\n\nSequential Attachment-based Fragment Embedding (SAFE) is a novel molecular line notation that represents molecules as an unordered sequence of fragment blocks to improve molecule design using generative models.\n\nFind the details and how to use at SAFE in the repo URL or the paper URL" ]
[ 24, 71 ]
[ "passage: TAGS\n#license-cc-by-4.0 #arxiv-2310.10773 #region-us \n# SAFE\n\nSequential Attachment-based Fragment Embedding (SAFE) is a novel molecular line notation that represents molecules as an unordered sequence of fragment blocks to improve molecule design using generative models.\n\nFind the details and how to use at SAFE in the repo URL or the paper URL" ]
36757eee30ff1e6c15e2685069e477a15378ee95
# Dataset Card for "normed_tts_data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
quocanh34/normed_tts_data
[ "region:us" ]
2023-10-28T11:49:28+00:00
{"dataset_info": {"features": [{"name": "sentence", "dtype": "string"}, {"name": "sentence_norm", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 12619, "num_examples": 131}], "download_size": 7015, "dataset_size": 12619}}
2023-10-28T11:49:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "normed_tts_data" More Information needed
[ "# Dataset Card for \"normed_tts_data\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"normed_tts_data\"\n\nMore Information needed" ]
[ 6, 17 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"normed_tts_data\"\n\nMore Information needed" ]
82629e2192366637e36c9a076264335a28c1acf6
# Dataset Card for Evaluation run of PulsarAI/2x-LoRA-Assemble-13B ## Dataset Description - **Homepage:** - **Repository:** https://huggingface.co/PulsarAI/2x-LoRA-Assemble-13B - **Paper:** - **Leaderboard:** https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - **Point of Contact:** [email protected] ### Dataset Summary Dataset automatically created during the evaluation run of model [PulsarAI/2x-LoRA-Assemble-13B](https://huggingface.co/PulsarAI/2x-LoRA-Assemble-13B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_PulsarAI__2x-LoRA-Assemble-13B", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2023-10-28T12:51:33.520951](https://huggingface.co/datasets/open-llm-leaderboard/details_PulsarAI__2x-LoRA-Assemble-13B/blob/main/results_2023-10-28T12-51-33.520951.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "em": 0.01740771812080537, "em_stderr": 0.0013393597649753585, "f1": 0.12005662751677883, "f1_stderr": 0.002244767452564408, "acc": 0.42864652552428745, "acc_stderr": 0.009950202519105519 }, "harness|drop|3": { "em": 0.01740771812080537, "em_stderr": 0.0013393597649753585, "f1": 0.12005662751677883, "f1_stderr": 0.002244767452564408 }, "harness|gsm8k|5": { "acc": 0.09249431387414708, "acc_stderr": 0.007980396874560173 }, "harness|winogrande|5": { "acc": 0.7647987371744278, "acc_stderr": 0.011920008163650865 } } ``` ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
open-llm-leaderboard/details_PulsarAI__2x-LoRA-Assemble-13B
[ "region:us" ]
2023-10-28T11:51:37+00:00
{"pretty_name": "Evaluation run of PulsarAI/2x-LoRA-Assemble-13B", "dataset_summary": "Dataset automatically created during the evaluation run of model [PulsarAI/2x-LoRA-Assemble-13B](https://huggingface.co/PulsarAI/2x-LoRA-Assemble-13B) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_PulsarAI__2x-LoRA-Assemble-13B\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2023-10-28T12:51:33.520951](https://huggingface.co/datasets/open-llm-leaderboard/details_PulsarAI__2x-LoRA-Assemble-13B/blob/main/results_2023-10-28T12-51-33.520951.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"em\": 0.01740771812080537,\n \"em_stderr\": 0.0013393597649753585,\n \"f1\": 0.12005662751677883,\n \"f1_stderr\": 0.002244767452564408,\n \"acc\": 0.42864652552428745,\n \"acc_stderr\": 0.009950202519105519\n },\n \"harness|drop|3\": {\n \"em\": 0.01740771812080537,\n \"em_stderr\": 0.0013393597649753585,\n \"f1\": 0.12005662751677883,\n \"f1_stderr\": 0.002244767452564408\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.09249431387414708,\n \"acc_stderr\": 0.007980396874560173\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.7647987371744278,\n \"acc_stderr\": 0.011920008163650865\n }\n}\n```", "repo_url": "https://huggingface.co/PulsarAI/2x-LoRA-Assemble-13B", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_drop_3", "data_files": [{"split": "2023_10_28T12_51_33.520951", "path": ["**/details_harness|drop|3_2023-10-28T12-51-33.520951.parquet"]}, {"split": "latest", "path": ["**/details_harness|drop|3_2023-10-28T12-51-33.520951.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2023_10_28T12_51_33.520951", "path": ["**/details_harness|gsm8k|5_2023-10-28T12-51-33.520951.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2023-10-28T12-51-33.520951.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2023_10_28T12_51_33.520951", "path": ["**/details_harness|winogrande|5_2023-10-28T12-51-33.520951.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2023-10-28T12-51-33.520951.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2023_10_28T12_51_33.520951", "path": ["results_2023-10-28T12-51-33.520951.parquet"]}, {"split": "latest", "path": ["results_2023-10-28T12-51-33.520951.parquet"]}]}]}
2023-10-28T11:51:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of PulsarAI/2x-LoRA-Assemble-13B ## Dataset Description - Homepage: - Repository: URL - Paper: - Leaderboard: URL - Point of Contact: clementine@URL ### Dataset Summary Dataset automatically created during the evaluation run of model PulsarAI/2x-LoRA-Assemble-13B on the Open LLM Leaderboard. The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2023-10-28T12:51:33.520951(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Evaluation run of PulsarAI/2x-LoRA-Assemble-13B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model PulsarAI/2x-LoRA-Assemble-13B on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-28T12:51:33.520951(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of PulsarAI/2x-LoRA-Assemble-13B", "## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL", "### Dataset Summary\n\nDataset automatically created during the evaluation run of model PulsarAI/2x-LoRA-Assemble-13B on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2023-10-28T12:51:33.520951(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ 6, 23, 31, 171, 67, 10, 4, 6, 6, 5, 5, 5, 7, 4, 10, 10, 5, 5, 9, 8, 8, 7, 8, 7, 5, 6, 6, 5 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for Evaluation run of PulsarAI/2x-LoRA-Assemble-13B## Dataset Description\n\n- Homepage: \n- Repository: URL\n- Paper: \n- Leaderboard: URL\n- Point of Contact: clementine@URL### Dataset Summary\n\nDataset automatically created during the evaluation run of model PulsarAI/2x-LoRA-Assemble-13B on the Open LLM Leaderboard.\n\nThe dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the agregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:## Latest results\n\nThese are the latest results from run 2023-10-28T12:51:33.520951(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):### Supported Tasks and Leaderboards### Languages## Dataset Structure### Data Instances### Data Fields### Data Splits## Dataset Creation### Curation Rationale### Source Data#### Initial Data Collection and Normalization#### Who are the source language producers?### Annotations#### Annotation process#### Who are the annotators?### Personal and Sensitive Information## Considerations for Using the Data### Social Impact of Dataset### Discussion of Biases### Other Known Limitations## Additional Information### Dataset Curators### Licensing Information### Contributions" ]
5d1f0a1e0cda424ca7e4f2774057306e18617448
# Dataset Card for "guvercin" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
22Plaruno/guvercin
[ "region:us" ]
2023-10-28T12:00:55+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "CAT_00", "1": "CAT_01", "2": "CAT_02", "3": "CAT_03", "4": "CAT_04", "5": "CAT_05", "6": "CAT_06"}}}}], "splits": [{"name": "train", "num_bytes": 3961938154.75, "num_examples": 19994}], "download_size": 3960895184, "dataset_size": 3961938154.75}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-10-28T12:04:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "guvercin" More Information needed
[ "# Dataset Card for \"guvercin\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"guvercin\"\n\nMore Information needed" ]
[ 6, 13 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"guvercin\"\n\nMore Information needed" ]
466dd5f01a9e9cd54d2d79d6856f8de657d34862
# Dataset Card for "invoices-and-receipts_ocr_v1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dajor85570/invoices-and-receipts_ocr_v1
[ "region:us" ]
2023-10-28T12:26:27+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}, {"split": "valid", "path": "data/valid-*"}]}], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "id", "dtype": "string"}, {"name": "parsed_data", "dtype": "string"}, {"name": "raw_data", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 465061949.289, "num_examples": 2043}, {"name": "test", "num_bytes": 23808463.0, "num_examples": 125}, {"name": "valid", "num_bytes": 22325731.0, "num_examples": 70}], "download_size": 281665599, "dataset_size": 511196143.289}}
2023-10-28T12:33:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "invoices-and-receipts_ocr_v1" More Information needed
[ "# Dataset Card for \"invoices-and-receipts_ocr_v1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"invoices-and-receipts_ocr_v1\"\n\nMore Information needed" ]
[ 6, 25 ]
[ "passage: TAGS\n#region-us \n# Dataset Card for \"invoices-and-receipts_ocr_v1\"\n\nMore Information needed" ]
e2d5a13c949eb6916059a28f085325552423ec52
DOTA v2 Dataset with OBB, specifically the version from the [Ultralytics docs](https://docs.ultralytics.com/datasets/obb/dota-v2/) - [Website](https://captain-whu.github.io/DOTA/dataset.html) ![Image](https://user-images.githubusercontent.com/26833433/259461765-72fdd0d8-266b-44a9-8199-199329bf5ca9.jpg) ## Full License Here reproduced from the website webpage License for Academic Non-Commercial Use Only This DOTA dataset is made available under the following terms: 1. The Google Earth images in this dataset are subject to Google Earth's terms of use, which must be adhered to. 2. The GF-2 and JL-1 satellite images are provided by the China Centre for Resources Satellite Data and Application. The aerial images are provided by CycloMedia B.V. 3. Permission is hereby granted, free of charge, to any person obtaining a copy of this dataset to use it for academic, research, and other non-commercial uses only. 4. Redistribution, modification, or commercial use of this dataset or any portion of it is strictly prohibited without explicit permission from the copyright holder. 5. Any academic work that makes use of this dataset should include a citation to the dataset source. All rights not expressly granted are reserved.
satellite-image-deep-learning/DOTAv2
[ "license:cc-by-4.0", "remote-sensing", "oriented-bounding-boxes", "object-detection", "region:us" ]
2023-10-28T12:31:15+00:00
{"license": "cc-by-4.0", "tags": ["remote-sensing", "oriented-bounding-boxes", "object-detection"]}
2023-10-28T12:50:07+00:00
[]
[]
TAGS #license-cc-by-4.0 #remote-sensing #oriented-bounding-boxes #object-detection #region-us
DOTA v2 Dataset with OBB, specifically the version from the Ultralytics docs - Website !Image ## Full License Here reproduced from the website webpage License for Academic Non-Commercial Use Only This DOTA dataset is made available under the following terms: 1. The Google Earth images in this dataset are subject to Google Earth's terms of use, which must be adhered to. 2. The GF-2 and JL-1 satellite images are provided by the China Centre for Resources Satellite Data and Application. The aerial images are provided by CycloMedia B.V. 3. Permission is hereby granted, free of charge, to any person obtaining a copy of this dataset to use it for academic, research, and other non-commercial uses only. 4. Redistribution, modification, or commercial use of this dataset or any portion of it is strictly prohibited without explicit permission from the copyright holder. 5. Any academic work that makes use of this dataset should include a citation to the dataset source. All rights not expressly granted are reserved.
[ "## Full License\nHere reproduced from the website webpage\n\nLicense for Academic Non-Commercial Use Only\n\nThis DOTA dataset is made available under the following terms:\n\n1. The Google Earth images in this dataset are subject to Google Earth's terms of use, which must be adhered to.\n2. The GF-2 and JL-1 satellite images are provided by the China Centre for Resources Satellite Data and Application. The aerial images are provided by CycloMedia B.V.\n3. Permission is hereby granted, free of charge, to any person obtaining a copy of this dataset to use it for academic, research, and other non-commercial uses only.\n4. Redistribution, modification, or commercial use of this dataset or any portion of it is strictly prohibited without explicit permission from the copyright holder.\n5. Any academic work that makes use of this dataset should include a citation to the dataset source.\n\nAll rights not expressly granted are reserved." ]
[ "TAGS\n#license-cc-by-4.0 #remote-sensing #oriented-bounding-boxes #object-detection #region-us \n", "## Full License\nHere reproduced from the website webpage\n\nLicense for Academic Non-Commercial Use Only\n\nThis DOTA dataset is made available under the following terms:\n\n1. The Google Earth images in this dataset are subject to Google Earth's terms of use, which must be adhered to.\n2. The GF-2 and JL-1 satellite images are provided by the China Centre for Resources Satellite Data and Application. The aerial images are provided by CycloMedia B.V.\n3. Permission is hereby granted, free of charge, to any person obtaining a copy of this dataset to use it for academic, research, and other non-commercial uses only.\n4. Redistribution, modification, or commercial use of this dataset or any portion of it is strictly prohibited without explicit permission from the copyright holder.\n5. Any academic work that makes use of this dataset should include a citation to the dataset source.\n\nAll rights not expressly granted are reserved." ]
[ 35, 211 ]
[ "passage: TAGS\n#license-cc-by-4.0 #remote-sensing #oriented-bounding-boxes #object-detection #region-us \n## Full License\nHere reproduced from the website webpage\n\nLicense for Academic Non-Commercial Use Only\n\nThis DOTA dataset is made available under the following terms:\n\n1. The Google Earth images in this dataset are subject to Google Earth's terms of use, which must be adhered to.\n2. The GF-2 and JL-1 satellite images are provided by the China Centre for Resources Satellite Data and Application. The aerial images are provided by CycloMedia B.V.\n3. Permission is hereby granted, free of charge, to any person obtaining a copy of this dataset to use it for academic, research, and other non-commercial uses only.\n4. Redistribution, modification, or commercial use of this dataset or any portion of it is strictly prohibited without explicit permission from the copyright holder.\n5. Any academic work that makes use of this dataset should include a citation to the dataset source.\n\nAll rights not expressly granted are reserved." ]
85e8cbdb2b71ed2ff46d7d48c4766ffe8be043c1
# cloudsen12 ***``A dataset about clouds from Sentinel-2``*** CloudSEN12 is a LARGE dataset (~1 TB) for cloud semantic understanding that consists of 49,400 image patches (IP) that are evenly spread throughout all continents except Antarctica. Each IP covers 5090 x 5090 meters and contains data from Sentinel-2 levels 1C and 2A, hand-crafted annotations of thick and thin clouds and cloud shadows, Sentinel-1 Synthetic Aperture Radar (SAR), digital elevation model, surface water occurrence, land cover classes, and cloud mask results from six cutting-edge cloud detection algorithms. CloudSEN12 is designed to support both weakly and self-/semi-supervised learning strategies by including three distinct forms of hand-crafted labeling data: high-quality, scribble and no-annotation. For more details on how we created the dataset see our paper: CloudSEN12 - a global dataset for semantic understanding of cloud and cloud shadow in Sentinel-2. **ML-STAC Snippet** ```python import mlstac secret = 'https://huggingface.co/datasets/jfloresf/mlstac-demo/resolve/main/main.json' train_db = mlstac.load(secret, framework='torch', stream=True, device='cpu') ``` **Sensor: Sentinel 2 - MSI** **ML-STAC Task: TensorToTensor, TensorSegmentation** **Data raw repository: [http://www.example.com/](http://www.example.com/)** **Dataset discussion: [https://github.com/IPL-UV/ML-STAC/discussions/2](https://github.com/IPL-UV/ML-STAC/discussions/2)** **Review mean score: 5.0** **Split_strategy: random** **Paper: [https://www.nature.com/articles/s41597-022-01878-2](https://www.nature.com/articles/s41597-022-01878-2)** ## Data Providers |Name|Role|URL| | :---: | :---: | :---: | |Image & Signal Processing|['host']|https://isp.uv.es/| |ESA|['producer']|https://www.esa.int/| ## Curators |Name|Organization|URL| | :---: | :---: | :---: | |Cesar Aybar|Image & Signal Processing|http://csaybar.github.io/| ## Reviewers |Name|Organization|URL|Score| | :---: | :---: | :---: | :---: | |Cesar Aybar|Image & Signal Processing|http://csaybar.github.io/|5| ## Labels |Name|Value| | :---: | :---: | |clear|0| |thick-cloud|1| |thin-cloud|2| |cloud-shadow|3| ## Dimensions ### input |Axis|Name|Description| | :---: | :---: | :---: | |0|C|Channels - Spectral bands| |1|H|Height| |2|W|Width| ### target |Axis|Name|Description| | :---: | :---: | :---: | |0|C|Hand-crafted labels| |1|H|Height| |2|W|Width| ## Spectral Bands |Name|Common Name|Description|Center Wavelength|Full Width Half Max|Index| | :---: | :---: | :---: | :---: | :---: | :---: | |B01|coastal aerosol|Band 1 - Coastal aerosol - 60m|443.5|17.0|0| |B02|blue|Band 2 - Blue - 10m|496.5|53.0|1| |B03|green|Band 3 - Green - 10m|560.0|34.0|2| |B04|red|Band 4 - Red - 10m|664.5|29.0|3| |B05|red edge 1|Band 5 - Vegetation red edge 1 - 20m|704.5|13.0|4| |B06|red edge 2|Band 6 - Vegetation red edge 2 - 20m|740.5|13.0|5| |B07|red edge 3|Band 7 - Vegetation red edge 3 - 20m|783.0|18.0|6| |B08|NIR|Band 8 - Near infrared - 10m|840.0|114.0|7| |B8A|red edge 4|Band 8A - Vegetation red edge 4 - 20m|864.5|19.0|8| |B09|water vapor|Band 9 - Water vapor - 60m|945.0|18.0|9| |B10|cirrus|Band 10 - Cirrus - 60m|1375.5|31.0|10| |B11|SWIR 1|Band 11 - Shortwave infrared 1 - 20m|1613.5|89.0|11| |B12|SWIR 2|Band 12 - Shortwave infrared 2 - 20m|2199.5|173.0|12|
jfloresf/demo
[ "language:en", "clouds", "sentinel-2", "image-segmentation", "deep-learning", "remote-sensing", "region:us" ]
2023-10-28T12:35:52+00:00
{"language": ["en"], "pretty_name": "cloudsen12", "tags": ["clouds", "sentinel-2", "image-segmentation", "deep-learning", "remote-sensing"]}
2023-11-12T23:38:12+00:00
[]
[ "en" ]
TAGS #language-English #clouds #sentinel-2 #image-segmentation #deep-learning #remote-sensing #region-us
cloudsen12 ========== *''A dataset about clouds from Sentinel-2''* CloudSEN12 is a LARGE dataset (~1 TB) for cloud semantic understanding that consists of 49,400 image patches (IP) that are evenly spread throughout all continents except Antarctica. Each IP covers 5090 x 5090 meters and contains data from Sentinel-2 levels 1C and 2A, hand-crafted annotations of thick and thin clouds and cloud shadows, Sentinel-1 Synthetic Aperture Radar (SAR), digital elevation model, surface water occurrence, land cover classes, and cloud mask results from six cutting-edge cloud detection algorithms. CloudSEN12 is designed to support both weakly and self-/semi-supervised learning strategies by including three distinct forms of hand-crafted labeling data: high-quality, scribble and no-annotation. For more details on how we created the dataset see our paper: CloudSEN12 - a global dataset for semantic understanding of cloud and cloud shadow in Sentinel-2. ML-STAC Snippet Sensor: Sentinel 2 - MSI ML-STAC Task: TensorToTensor, TensorSegmentation Data raw repository: URL Dataset discussion: URL Review mean score: 5.0 Split\_strategy: random Paper: URL Data Providers -------------- Curators -------- Reviewers --------- Labels ------ Dimensions ---------- ### input ### target Spectral Bands --------------
[ "### input", "### target\n\n\n\nSpectral Bands\n--------------" ]
[ "TAGS\n#language-English #clouds #sentinel-2 #image-segmentation #deep-learning #remote-sensing #region-us \n", "### input", "### target\n\n\n\nSpectral Bands\n--------------" ]
[ 33, 3, 9 ]
[ "passage: TAGS\n#language-English #clouds #sentinel-2 #image-segmentation #deep-learning #remote-sensing #region-us \n### input### target\n\n\n\nSpectral Bands\n--------------" ]