sha
stringlengths 40
40
| text
stringlengths 0
13.4M
| id
stringlengths 2
117
| tags
list | created_at
stringlengths 25
25
| metadata
stringlengths 2
31.7M
| last_modified
stringlengths 25
25
|
---|---|---|---|---|---|---|
04e3c64d63c6c804617dd8c24f77616a9ed47f0d
|
whitefox44/ReflectionGPT4
|
[
"license:apache-2.0",
"region:us"
] |
2023-05-05T02:38:53+00:00
|
{"license": "apache-2.0"}
|
2023-05-07T23:20:25+00:00
|
|
7e704e7cdb32336f713065602b18b395d621474a
|
whitefox44/ImprovementFromReflection
|
[
"license:apache-2.0",
"region:us"
] |
2023-05-05T02:40:04+00:00
|
{"license": "apache-2.0"}
|
2023-05-07T23:17:58+00:00
|
|
059f4c36ca0b18c1713e453cfbc99cad7697dc6f
|
่ฟๆฏไธชๆต่ฏๆฐๆฎ้
|
zhangxuri/test
|
[
"task_categories:text-classification",
"size_categories:1K<n<10K",
"size_categories:n<1K",
"language:aa",
"chemistry",
"zhangxu7ri",
"region:us"
] |
2023-05-05T03:33:58+00:00
|
{"language": ["aa"], "size_categories": ["1K<n<10K", "n<1K"], "task_categories": ["text-classification"], "tags": ["chemistry", "zhangxu7ri"]}
|
2023-06-08T07:11:00+00:00
|
3e143101dfafbd8caa76cea384b1806edf729a7b
|
# Dataset Card for "processed_demo"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/processed_demo
|
[
"region:us"
] |
2023-05-05T04:26:55+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 11555846.1, "num_examples": 27000}, {"name": "test", "num_bytes": 1283982.9, "num_examples": 3000}], "download_size": 5616892, "dataset_size": 12839829.0}}
|
2023-05-05T04:27:00+00:00
|
00d8b6172ac08b6c09035f0e67ddc590e0eee81a
|
# Dataset Card for "final_train_v1_230000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v1_230000
|
[
"region:us"
] |
2023-05-05T04:50:26+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 11579207.4, "num_examples": 27000}, {"name": "test", "num_bytes": 1286578.6, "num_examples": 3000}], "download_size": 5620134, "dataset_size": 12865786.0}}
|
2023-05-05T04:50:32+00:00
|
dff02866b856a8d86eeffad81a219b45ac73e50c
|
# Dataset Card for "final_train_v1_260000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v1_260000
|
[
"region:us"
] |
2023-05-05T04:51:41+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 11620869.3, "num_examples": 27000}, {"name": "test", "num_bytes": 1291207.7, "num_examples": 3000}], "download_size": 5644232, "dataset_size": 12912077.0}}
|
2023-05-05T04:51:46+00:00
|
2e4bdd216cd7a9c5b13f3dc5834eac51aa03b1a0
|
# Dataset Card for "final_train_v1_290000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v1_290000
|
[
"region:us"
] |
2023-05-05T04:52:40+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 11580033.6, "num_examples": 27000}, {"name": "test", "num_bytes": 1286670.4, "num_examples": 3000}], "download_size": 5615299, "dataset_size": 12866704.0}}
|
2023-05-05T04:52:45+00:00
|
6fd571a46ea8872795a5a4601d588c0bdfbfee37
|
# Dataset Card for "final_train_v1_320000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v1_320000
|
[
"region:us"
] |
2023-05-05T04:53:20+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 11628917.1, "num_examples": 27000}, {"name": "test", "num_bytes": 1292101.9, "num_examples": 3000}], "download_size": 5657480, "dataset_size": 12921019.0}}
|
2023-05-05T04:53:25+00:00
|
5af7ca5c3f355c4e8fd2d32c34c73a8b0999d18c
|
# Dataset Card for "final_train_v1_350000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v1_350000
|
[
"region:us"
] |
2023-05-05T04:54:04+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 11612248.2, "num_examples": 27000}, {"name": "test", "num_bytes": 1290249.8, "num_examples": 3000}], "download_size": 5636961, "dataset_size": 12902498.0}}
|
2023-05-05T04:54:10+00:00
|
f72f0b13e97df177b650b37244b010ce0208b4e1
|
# Dataset Card for "final_train_v1_380000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v1_380000
|
[
"region:us"
] |
2023-05-05T04:54:53+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 11599066.8, "num_examples": 27000}, {"name": "test", "num_bytes": 1288785.2, "num_examples": 3000}], "download_size": 5635427, "dataset_size": 12887852.0}}
|
2023-05-05T04:54:58+00:00
|
98e1938e5f3132e8f7199d2623c6db7da84617fc
|
# Dataset Card for "final_train_v1_410000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v1_410000
|
[
"region:us"
] |
2023-05-05T04:55:35+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 11588553.0, "num_examples": 27000}, {"name": "test", "num_bytes": 1287617.0, "num_examples": 3000}], "download_size": 5629024, "dataset_size": 12876170.0}}
|
2023-05-05T04:55:40+00:00
|
58dc044690f664380cb4af638dc0391f2cbb8afb
|
# Dataset Card for "batch_indexing_machine_112x224_images"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
Circularmachines/batch_indexing_machine_112x224_images
|
[
"region:us"
] |
2023-05-05T04:57:51+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 179731987.45, "num_examples": 14502}], "download_size": 179278891, "dataset_size": 179731987.45}}
|
2023-05-05T18:23:12+00:00
|
20a2baacf9884372cf60695daf89b5adf75406ac
|
DBLL/lora
|
[
"license:other",
"region:us"
] |
2023-05-05T05:43:01+00:00
|
{"license": "other"}
|
2023-05-08T13:36:45+00:00
|
|
d2f9cd477a08e561e7d126b49ff679200ff80f63
|
LangChainDatasets/multiagent-bidding-dialogue
|
[
"license:mit",
"region:us"
] |
2023-05-05T05:52:30+00:00
|
{"license": "mit", "dataset_info": {"features": [{"name": "prompts", "sequence": "string"}, {"name": "generations", "list": {"list": [{"name": "generation_info", "dtype": "null"}, {"name": "message", "struct": [{"name": "content", "dtype": "string"}, {"name": "example", "dtype": "bool"}]}, {"name": "text", "dtype": "string"}]}}], "splits": [{"name": "train", "num_bytes": 191044, "num_examples": 40}], "download_size": 39300, "dataset_size": 191044}}
|
2023-05-05T06:19:32+00:00
|
|
a14a31b5f5f595c776e13bee1d2d339d00e41456
|
import openai
import gradio
MAX_MESSAGE_LENGTH = 200
MAX_RESPONSE_LENGTH = 1000
MAX_CONTEXT_LENGTH = 4096
openai.api_key = "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"
messages = [{"role": "system", "content": "Tu sei un chat bot chiamato Salomone, sei un cristiano evangelico pentecostale e sei nato per rispondere ai fedeli in base al tuo credo evangelico pentecostale. sei gentile, premuroso e il tuo modello di ispirazione รจ Gesรน, sei stato programmato da Marco Giraldi."}]
def CustomChatGPT(user_input):
if len(user_input) > MAX_MESSAGE_LENGTH:
return "Richiesta troppo lunga."
else:
messages.append({"role": "user", "content": user_input})
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
max_tokens=1024,
messages = messages
)
ChatGPT_reply = response["choices"][0]["message"]["content"]
if len(ChatGPT_reply) > MAX_RESPONSE_LENGTH:
# ChatGPT_reply = ChatGPT_reply[:1000]
# return ChatGPT_reply + "...\nLa risposta รจ troppo lunga, potrebbe intasare momentaneamente i server.\n"
return ChatGPT_reply
messages.append({"role": "assistant", "content": ChatGPT_reply})
return ChatGPT_reply
demo = gradio.Interface(fn=CustomChatGPT, inputs = "text", outputs = "text", title = "Salomone")
demo.launch(share=True)
|
Salomone/ServerBot
|
[
"region:us"
] |
2023-05-05T06:04:14+00:00
|
{}
|
2023-05-05T06:04:39+00:00
|
006b410cf0a17b77b448fe3d20d11ae6a8338951
|
> I am not the author of this dataset. [View on GitHub](https://github.com/ye-kyaw-thu/khPOS).
# khPOS (draft released 1.0)
khPOS (Khmer Part-of-Speech) Corpus for Khmer NLP Research and Developments
## Lincense
Creative Commons Attribution-NonCommercial-Share Alike 4.0 International (CC BY-NC-SA 4.0) License
[Details Info of License](https://creativecommons.org/licenses/by-nc-sa/4.0/)
## Introduction
The khPOS Corpus (Khmer POS Corpus) is a 12,000 sentences (25,626 words) manually word segmented and POS tagged corpus developed for Khmer language NLP research and developments. We collected Khmer sentences from websites that include various area such as economics, news, politics. Moreover it is also contained some student list and voter list of national election committee of Cambodia. The average number of words per sentence in the whole corpus is 10.75. Here, some symbols such as "แ" (Khmer sign Khan), "แ" (Khmer sign Camnuc pii kuuh), "-", "?", "\[", "\]" etc. also counted as words. The shotest sentence contained only 1 word and longest sentence contained 169 words as follows (here, line number : Khmer sentence):
1814 : " แแแ แฅแ แแถแ แแแขแแ_แแแแพแ แชแแปแ แแผแ แฏแ แแ แแแ แแแ แแนแ แแ แแผแ แแนแ แชแแปแ แฏแ แแปแ แแถแ แแแแแ
แแแแ_แแถแแ แขแแแ\~แแผแแท แแ แแแ แแถแแฝแ แฏแ แแแ แแแ แแแแถแ แแแแถแแ แแ
แแถแแฝแ แแ แแแปแแแแ แแแ แแถแแท แแถ แแแ_แแธแแถ แแปแ แขแถแ
แแ
แแถแแฝแ แแแปแแแ_แแแ แแถแ แแผแ แแ แแถแแ แแแแ แแแ แแแ แแ
แแแแแ แแทแ
แแ
แแ
แ
แแแแ แแปแ แแแแ\~แแแแแแ แแแแ\~แฅแแแแแแถแแทแแถแ แแบ แแปแ แขแแแแ แฒแแ แแแแ\~แขแแแ แแแแแถแ แแ แแแ แแผแ แฏแ แแทแ แชแแปแ แแผแ แฏแ แแปแ แแธ แแถแ แแทแ แแ แแแปแแแแแ แแแแถแ แแถแ แแถแแ แแผแ แแปแ แแแแ\~แฅแแแแแ แฒแแ แแแแ\~แขแแแ แแแแแแถ แแแแถแ แแผแ แแปแ_แแแแแ แแแ แแแปแแแ แแ
แแถแ แแแ แแผแแ
แแ แแผแ_แแแแปแ แแแแแ แแแแถแ ! แแแแถแ แแปแ แขแถแ
แแ
แแถแแฝแ_แแนแ แแผแ แแถแ แแแ แแ แแแแถแ แแ แแผแ แฏแ แแ
แแแ แแแแถแแถแ แแแแถแ แฏ แแถแ แแพ แแฝแ แแ แแแแถแ แแนแ แแถแ แแผแ แฏแ แแ
แแปแ_แแนแ แแแแปแ แขแถแ แแแแขแผแ แแ
_แแแแปแ แแฝแ แแแแ\~แฅแแแแแ แ แพแ แแนแ แแแ แแนแ แแแแแ แแถแ แแผแ แฏแ แฒแแ แแถแแ แแ แแแแทแ แแแปแแแ_แแแ แแแแแถแแ_แแธ แแแ แแ แแแแถแ แแนแ แแถแ แแผแ แฏแ แ
แผแ แแ
_แแแแปแ แแแแถแแถแ แแฝแ
แแถแ แแผแ แฏแ แแ
แแแแถแ_แแแแแ\~แฅแแแแแ " แ
## Word Segmentation
In Khmer texts, words composed of single or multiple syllables are usually not separated by white space. Spaces are used for easier reading and generally put between phrases, but there are no clear rules for using spaces in Khmer language. Therefore, word segmentation is a necessary prerequisite for POS tagging. Four classes of segment (word) types were observed during the manual segmentation of the corpus of Khmer text, each representing a different type of word, these were:
- Word Type 1: Single Words
- Word Type 2: Compound Words
- Word Type 3: Compound Words with Prefix
- Word Type 4: Compound Words with Suffix
For the detail information of the word segmentation rules and how we built a Khmer word segmentation model, please refer to our published paper (see Publiation Section).
## POS Tags
Part of speech is a category to which a word is assigned in accordance with its syntactic functions. In Khmer grammatical system, many linguists has defined their own POS according to their trend of research. Even though, many books are published, there are no standard agreement yet especially on number and name of POS tags. Comparing to English language, some English POS are not used in Khmer language, such as gerund, comparative and superlative adjectives, particle, etc. Based on CHOUN NATH dictionary, Khmer POS Tag set is defined. Some new POS tags that are not defined in the dictionary are added for considering word disambiguation task. Unlike English grammar, some Khmer sentences consist of more than one verb.
The definitions and descriptions of POS tags are presented in detail as follow:
1. Abbreviation (AB): For example, แแ or แ.แ for kilometer (km), แขแแ for United Nation (UN), แแ or แ.แ for แแปแ แแ แ (Buddhism era), แแ or แ.แ for แแแ แ (police), แขแ or แข.แ for แแปแแ แ (Police Military) etc.
2. Adjective is a word used to modify or describe the noun. Adjective is usually at the right hand side of noun. There are very few adjectives that their positions are before noun. แแแแ แ (red), แแแแแ (half), แแแแแ (strange), แแผแ
(small), แแแข (good), แแแขแถแ (beautiful) etc.
3. Adverb (RB): An adverb is a word that is used to modify verb, adjective or another adverb. For example, แแถแแ (very), แแปแ (not), แแพแ (just), แแแแแแแ (very), แ แพแ (already) etc.
4. Auxiliary Verb (AUX): Only three groups of verbs are tagged as auxiliary verb that used to make tense.
- Past form: แแถแ or แแถแ + Verb
- Progressive form: แแแแปแ + Verb
- Future form: แแนแ + Verb
5. Cardinal Number (CD): A cardinal number is a word or a number that denoting the quality. For example, แแธ (three), แกแ แ (100), แ
แแป (four), แแถแแ (thousand), แแถแ (million) etc.
6. Conjunction (CC): Conjunction is a word to connect between words, phrases, and sentences. แแแแแปแแแแ (but), แแธแแแแแ (because), แแแแทแ (for, since), แแแแแถแแแ (until), แแปแแแแแแแ (otherwise), แแพ (if) etc.
7. Currency (CUR): CUR for currency symbol such as: แ, \$, โค, โฌ etc.
8. Determiner Pronoun (DT): In Khmer grammar, determiners are classified under pronoun unlike English. It is used to tell location or/and uncertainty of noun. They are equivalent to English words: this, that, those, these, all, every, each, some etc. For example, แแแ (this), แแแ (that), แแถแแแแแ (these), แแถแแแขแแ (all), แแถแแถ (various), แแแแ (some), แแแแ (every) etc.
9. Double Sign (DBL): Double sign (แ) is used to remind reader to read the previous word twice. For example, แแแปแแแ/NN (people) แแแแแ/DT (every) แ/DBL แแแแถ/PRO (person), "everybody" in English.
10. Et Cetera (ETC): แแแ is equal to et cetera (etc.) in English.
11. Full Stop (KAN): There are two full stops in Khmer language, แ for sentence and แ for paragraph.
12. Interjection (UH): Word represents sound of animal, machine, and surprised sound. Interjections are always at the beginning of a sentence, and mostly followed by exclamation mark. For example, แขแผ (Oh!), แแแแ (Meow), แขแแปแ (uh) etc.
13. Measure Word (M): Measure Words are classified to describe different quality corresponding class of noun. Some of these words can not be found in English. For example: แแแแแแแแ/NN (monk) แข/CD (2) แขแแแ/M (person), แแแแแแแแแถแแ/NN (cloth) แก/CD (1), แแแแแถแแ/M (set), แแแแ/NN (dog) แก/CD (1) แแแแถแ/M (head) etc.
14. Noun (NN): A noun is a word or compound word that identifies a person, an animal, an object, an idea, a thing, etc. For example: แกแถแ (Car), แแถแแขแแทแแแแแแ (Development), แแแแแแแถแ (Action), แแแแ
แแ (Pencil), แแนแแแ (Ice) etc.
15. Particle (PA): We consider three types of particle and they are hesitation, response and final. For the two medial particle words แแ ("so, then, but" in English) and แแผแ ("of, with" in English) \[1\], we consider them as RB and IN.
- Hesitation Particle: แแแแปแ (I) แแทแ (think) โฆแขแแพ/PA (Er. . .) แแทแ (not) แแพแ (see), ("I erโฆ donโt think so" in English)
- Response Particle: แขแพ/PA (Hm, Ah) แแแแปแ (I) แแนแ (know) แ แพแ (already), ("Hmm I already know" in English)
- Final Particle: There are some final particles such as แแถแ, แแทแ and แ
แปแ. Example usage of แแถแ: แแปแ/RB (don't) แแแแแ
/VB (forget) แแถแ/PA, ("Hmm don't forget!" in English), Example usage of แแทแ: แ
แถแ/VB (wait) แแแแแทแ
/RB (a while) แแทแ/PA, Example usage of แ
แปแ: แแ
/VB (go) แ
แปแ/PA
16. Preposition (IN): Preposition is a word or a compound word that is used to connect two different words or phrases. It indicate the place, time, possession, relation etc. For example, แ
แแแแ (to), แแแ (to), แแพแแแแธ (in order to), แแแแปแ (in), แแพ (on), แแแถแ (between, around) etc.
17. Pronoun (PRO): A pronoun is a word that substitutes of a noun or a noun phrase. Those words are equivalent to Englis word: I, he, she, it, we, they, them, him, her etc. For example, แแแแปแ (I), แแถแแ (he or she), แแพแ (we), แแฝแแแพแ (our group or we), แแแแปแแแถแ (polite form of I, me), แแผแแแแแแ (I, me for conversation with royal family) etc.
18. Proper Noun (PN): A proper noun is a noun that represents of a unique thing, for example, name of person, name of place and name of date etc. For example: แแปแแถ (Sokha) แแแแแแแ (Phnom Penh), แแแแแขแแแแถแ (Tuesday), แแถแแแแทแ
(Caltex), แแแแแแ (Mekong) etc.
19. Question Word (QT): In Khmer language, แแพ is mostly used in the beginning of an interrogative sentence. For example,
แแพ/QT แขแแแ/PRO (you) แแแแแ/NN (name) แขแแแธ/PRO (what)?, "What is your name?" in English.
20. Relative Pronoun (RPN): In Khmer language, there is only one relative pronoun. It is แแแ "that, which, where, who" in English.
21. Symbol (SYM): SYM for others sign or symbol such as: +, -, \*, \/, แ, =, @, \#, \% etc.
22. VB\_JJ: VB\_JJ is a tag for an adjective which its original form is a Verb. Currently, there is no proposed POS tag name for such kind of Khmer words. Although we can use JJ tag, we want to clarify by using VB\_JJ POS tag for its function and also for semantic purpose. For example:
- The word แแแแแถแแ (for) or แแพแแแแธ (to) is normally removed in both written and spoken Khmer.
แแแแแแ/NN (place) แแแแแถแแ (for) แแแแพแแถแ/VB\_JJ (working), office in English
แแแถแแแธแ/NN (Machine) แแแแแถแแ (for) แแแ/VB\_JJ (washing) แแแขแถแ/NN (cloth), washing machine in English
แแฝแแแถแแ/PRO (they) แขแถแ
/VB (can) แแถแ/VB (have) แแถแแแถแ/NN (work) แแแแพ/VB\_JJ (to do)
- When Khmer Relative Pronoun is removed, the verb form keep the same as it was. It must be VB\_JJ it is no longer a Verb in subbordiante clause.
แแทแแแ (student) แแแ (who) แแถแ/VB (has) แแทแแแแป (mark) แแแแแ (hight) แแนแ (will) แแแฝแแแถแ (get) แขแถแ แถแแผแแแแแ (scholarship), student who has hight mark will get a scholarship in English but when แแแ who is removed, แแถแ/VB (has) should become แแถแ/VB\_JJ (having)
23. Verb (VB): Verb is a word that shows the action, even, and condition. Verb is a middle part of phrase. Normally, verb always need object and sometime it also need complement. For example, แแแแถแแ (listen), แแถแแแแแแถแแแ (say), แแแแแถแแ (love), แ
แแแแ (sing), แแพแแแ (drive) etc.
24. Verb Complement (VCOM): Its original form is a verb, but it will turn into VCOM when two verbs in a sentence to emphasize the first verb. Especially, a compound verb is splitted by the word แแทแ (no or not), the first part is a verb and the second part is VCOM. For example, แแแ (sell) แแถแ
แ/VCOM (a lot), แแแแแ (exam) แแทแ (no) แแถแแ/VCOM (pass), แแแ/VB (sleep), แแทแ/RB (not) แแแ/VCOM (sleep well) etc.
## Files/Scripts
Corpus-draft-ver-1.0/ (**_latest version_**)
**Scripts:**
mk-wordtag.pl : Perl script for printing word only file, tag only file, listing compound-words etc.
mk-pair.pl : Perl script for combining word file and tag file to word/tag format
**Data:**
data/ : Data preparation folder for incremental POS-tagging models
**Models:**
Two-Hours/: Incremental training (2,000 to 12,000 sentences) of 2hours annotation approach models with khPOS corpus.
Running logfile: [note.txt](https://github.com/ye-kyaw-thu/khPOS/blob/master/corpus-draft-ver-1.0/model/2hours/note.txt)
3gHMM/ : Incremental training (2,000 to 12,000 sentences) of 3-gram HMM (Hidden Markov Model) models with khPOS corpus.
Running logfile: [note.txt](https://github.com/ye-kyaw-thu/khPOS/blob/master/corpus-draft-ver-1.0/model/3gHMM/note.txt)
crf/ : Incremental training (2,000 to 12,000 sentences) of CRF POS-tagging models with khPOS corpus.
Running logfile: [note.txt](https://github.com/ye-kyaw-thu/khPOS/blob/master/corpus-draft-ver-1.0/model/crf/note.txt)
kytea/ : Incremental training (2,000 to 12,000 sentences) of L2 regularized SVM models with khPOS corpus.
Running logfile: [note](https://github.com/ye-kyaw-thu/khPOS/blob/master/corpus-draft-ver-1.0/model/kytea/note.txt)
maxent/ : Incremental training (2,000 to 12,000 sentences) of Maximum Entrophy models with khPOS corpus.
Running logfile: [note.txt](https://github.com/ye-kyaw-thu/khPOS/blob/master/corpus-draft-ver-1.0/model/maxent/note.txt)
rdr/ : Incremental training (2,000 to 12,000 sentences) of RDR (Ripple Down Rule-based) models with khPOS corpus.
Running logfile: [note.txt](https://github.com/ye-kyaw-thu/khPOS/blob/master/corpus-draft-ver-1.0/model/rdr/note.txt)
## Development and Support
Contributors
Vichet Chea
[Ye Kyaw Thu](https://sites.google.com/site/yekyawthunlp/)
## Acknowledgements
We would like to express our gratitude to Mr. Sorn Kea and Miss Leng Greyhuy for their help in POS tagging 12,100 sentences of Khmer Corpus manually.
## Publication
*Please cite following paper:*
Ye Kyaw Thu, Vichet Chea, Yoshinori Sagisaka, "Comparison of Six POS Tagging Methods on 12K Sentences Khmer Language POS Tagged Corpus", In the first Regional Conference on Optical character recognition and Natural language processing technologies for ASEAN languages (ONA 2017), December 7-8, 2017, Phnom Penh, Cambodia. [paper](https://github.com/ye-kyaw-thu/khPOS/blob/master/khpos.pdf)
## Reference
Vichet Chea, Ye Kyaw Thu, Chenchen Ding, Masao Utiyama, Andrew Finch and Eiichiro Sumita, "Khmer Word Segmentation Using Conditional Random Fields", In Khmer Natural Language Processing 2015 (KNLP2015), December 4, 2015, Phnom Penh, Cambodia.
[paper](http://khmernlp.org/2015/wp-content/uploads/2016/09/Paper-Khmer-Word-Segmentation-Using-.pdf)
Madeline Elizabeth. Ehrman, Kem Sos, Foreign Service Institute (U.S.), and Defense Language Institute (U.S.). Contemporary Cambodian: grammatical sketch, by Madeline E. Ehrman, with the assistance of Kem Sos. Foreign Service Institute, Dept. of State; \[for sale by the Supt. of Docs., U.S. Govt. Print. O .\] Washington, 1972.
|
seanghay/khPOS
|
[
"task_categories:text-classification",
"task_categories:text-generation",
"size_categories:10K<n<100K",
"language:km",
"license:cc-by-nc-sa-4.0",
"region:us"
] |
2023-05-05T06:20:44+00:00
|
{"language": ["km"], "license": "cc-by-nc-sa-4.0", "size_categories": ["10K<n<100K"], "task_categories": ["text-classification", "text-generation"], "pretty_name": "Khmer Part-of-Speech Corpus for Khmer NLP Research and Developments", "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "pos_tags", "sequence": {"class_label": {"names": {"0": "AB", "1": "AUX", "2": "CC", "3": "CD", "4": "DBL", "5": "DT", "6": "ETC", "7": "IN", "8": "JJ", "9": "KAN", "10": "M", "11": "NN", "12": "PA", "13": "PN", "14": "PRO", "15": "QT", "16": "RB", "17": "RPN", "18": "SYM", "19": "UH", "20": "VB", "21": "VB_JJ", "22": "VCOM"}}}}], "splits": [{"name": "train", "num_bytes": 3569524, "num_examples": 12000}], "download_size": 2372205, "dataset_size": 3569524}}
|
2023-05-08T06:58:27+00:00
|
c23a0a578f1a2c77a8a9fcf9a1465501ecb6a9fa
|
# Dataset Card for jawiki-20220404-c400
This dataset contains passages, each of which consists of consecutive sentences no longer than 400 characters from Japanese Wikipedia as of 2022-04-04.
This dataset is used in baseline systems for [the AI็ question answering competition](https://sites.google.com/view/project-aio/home), such as [cl-tohoku/AIO3_BPR_baseline](https://github.com/cl-tohoku/AIO3_BPR_baseline).
Please refer to [the original repository](https://github.com/cl-tohoku/quiz-datasets) for further details.
|
llm-book/jawiki-20220404-c400
|
[
"task_categories:question-answering",
"size_categories:10M<n<100M",
"language:ja",
"license:mit",
"region:us"
] |
2023-05-05T06:34:52+00:00
|
{"language": ["ja"], "license": "mit", "size_categories": ["10M<n<100M"], "task_categories": ["question-answering"]}
|
2023-10-25T14:26:19+00:00
|
60c9327eea7c1e5df96a1ad458728cc23e50145c
|
# Dataset Card for "clean"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
keehuachin/clean
|
[
"region:us"
] |
2023-05-05T07:14:33+00:00
|
{"dataset_info": {"features": [{"name": "Input", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "labels", "sequence": "int64"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 137366668.6552888, "num_examples": 8296}, {"name": "test", "num_bytes": 34358225.344711214, "num_examples": 2075}], "download_size": 41193800, "dataset_size": 171724894.0}}
|
2023-05-05T07:14:40+00:00
|
3904b9bc917849e141a82b690b4073a874495e85
|
# Dataset Card for "cleaner"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
keehuachin/cleaner
|
[
"region:us"
] |
2023-05-05T07:16:44+00:00
|
{"dataset_info": {"features": [{"name": "Input", "dtype": "string"}, {"name": "cleaner_text", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "labels", "sequence": "int64"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 136223322.9104233, "num_examples": 8296}, {"name": "test", "num_bytes": 34072251.08957671, "num_examples": 2075}], "download_size": 39442189, "dataset_size": 170295574.0}}
|
2023-05-05T07:16:51+00:00
|
0130f4a000ed2f7d70b53298aa7241fe7b917e09
|
# Dataset Card for "two-player-dnd"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
LangChainDatasets/two-player-dnd
|
[
"region:us"
] |
2023-05-05T07:17:35+00:00
|
{"dataset_info": {"features": [{"name": "generations", "list": {"list": [{"name": "generation_info", "dtype": "null"}, {"name": "message", "struct": [{"name": "content", "dtype": "string"}, {"name": "example", "dtype": "bool"}]}, {"name": "text", "dtype": "string"}]}}, {"name": "messages", "list": [{"name": "data", "struct": [{"name": "content", "dtype": "string"}, {"name": "example", "dtype": "bool"}]}, {"name": "type", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 331595, "num_examples": 36}], "download_size": 69177, "dataset_size": 331595}}
|
2023-05-12T04:55:46+00:00
|
be77123af9a4a6ea7eaa76386139b88b8798ce3a
|
# Dataset Card for "rawdata"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
keehuachin/rawdata
|
[
"region:us"
] |
2023-05-05T07:23:12+00:00
|
{"dataset_info": {"features": [{"name": "Input", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "labels", "sequence": "int64"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 133159351.90824045, "num_examples": 8038}, {"name": "test", "num_bytes": 33298121.091759555, "num_examples": 2010}], "download_size": 40307631, "dataset_size": 166457473.0}}
|
2023-05-05T07:23:19+00:00
|
0718707e34b26b49848d86dbb20e357e7fce7b19
|
# Dataset Card for "state-of-the-union-completions"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
LangChainDatasets/state-of-the-union-completions
|
[
"region:us"
] |
2023-05-05T08:02:46+00:00
|
{"dataset_info": {"features": [{"name": "generations", "list": {"list": [{"name": "generation_info", "struct": [{"name": "finish_reason", "dtype": "string"}, {"name": "logprobs", "dtype": "null"}]}, {"name": "text", "dtype": "string"}]}}, {"name": "ground_truth", "dtype": "string"}, {"name": "prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 65981, "num_examples": 50}], "download_size": 50527, "dataset_size": 65981}}
|
2023-05-15T23:40:59+00:00
|
bd2c7369ef7ec16e00f022a66b909adf211382f5
|
Rucy/car
|
[
"license:bigscience-openrail-m",
"region:us"
] |
2023-05-05T08:12:42+00:00
|
{"license": "bigscience-openrail-m"}
|
2023-05-05T08:15:09+00:00
|
|
cf247d4936abe7518c554352556d105d167f4ee4
|
NPrashanthReddy/book-embeddings
|
[
"license:mit",
"region:us"
] |
2023-05-05T08:12:51+00:00
|
{"license": "mit"}
|
2023-05-05T11:59:57+00:00
|
|
d515f43e25e9dd6e0f2bdde1843b931ba44039dd
|
marsfly/temp
|
[
"license:mit",
"region:us"
] |
2023-05-05T08:40:03+00:00
|
{"license": "mit"}
|
2023-05-05T12:04:12+00:00
|
|
e7479e7a64b58c410a6da6e350d34066fef3e493
|
# Dataset Card for RuFacts
## Dataset Description
RuFacts is a benchmark for internal fact-checking for the Russian language. The dataset contains tagged examples labeled consistent and inconsistent.
For inconsistent examples, ranges containing violations of facts in the source text and the generated text are also collected and presented on the [Kaggle competition page](https://www.kaggle.com/competitions/internal-fact-checking-for-the-russian-language).
Various data sources and approaches for data generation were used to create the training and test datasets for the fact-checking task. We consider the data on the sentence level and small texts. The average length of texts is 198 symbols, the minimum is 10 symbols, and the maximum is 3,402 symbols.
The final dataset was formed using three main approaches:
* Texts generated by a [paraphrase model](https://habr.com/ru/companies/sberdevices/articles/667106/)
* Translations of the [dataset for fact-checking](https://fever.ai/dataset/fever.html)
* Text augmentation
Translations and generated data were manually labeled via the crowd-sources platform Yandex.Toloka. We additionally manually annotate the augmented data for
the test set. The test set consists of examples from all three sources: 26% translations, 6% augmented data, and 68% generated paraphrases.
We require three criteria for the generated text to be factually consistent with the original:
1. facts are correct and not corrupted;
2. any additional facts in the generated texts are not included;
3. all the main facts are included in the generated text.
## Data Structure
### Data Fields
* `idx`: an integer
* `evidence`: a string containing the original text
* `claim`: a string containing the generated text by some genetative models
* `label`: an integer, either 0 or 1, indicating whether the facts are consistent (0) or inconsistent (1)
An example of `train`/`validation` looks as follows:
```
{'idx': 1,
'evidence': 'ะกัะด ะฒ ะะฝะณะปะธะธ ัะฐััะผะพััะธั ะดะตะปะพ ัะพะฒะตััะบะพะณะพ ะดะธััะธะดะตะฝัะฐ ะัะบะพะฒัะบะพะณะพ',
'claim': 'ะกัะด ะฒ ะะตะปะธะบะพะฑัะธัะฐะฝะธะธ ัะฐััะผะพััะธั ะดะตะปะพ ัะพะฒะตััะบะพะณะพ ะดะธััะธะดะตะฝัะฐ ะัะบะพะฒัะบะพะณะพ',
'label': 0}
```
An example of `test` looks as follows:
```
{'idx': 4,
'evidence': 'Google ะฒัะฟะปะฐัะธั ัััะฐั ะฒ 200 ะผะปะฝ ะดะพะปะปะฐัะพะฒ ะทะฐ ัะฑะพั ะดะฐะฝะฝัั
ะดะตัะตะน ะฝะฐ YouTube.',
'claim': 'Google ะทะฐะฟะปะฐัะธั $200 ะผะปะฝ ะทะฐ ะฝะฐัััะตะฝะธั ะบะพะฝัะธะดะตะฝัะธะฐะปัะฝะพััะธ ะดะตัะตะน ะฝะฐ YouTube.',
'label': -1}
```
### Data Splits
| |train | validation | test|
|-----|------|------------|-----|
|rows |4677 | 1559 | 500 |
|
akozlova/RuFacts
|
[
"task_categories:text-classification",
"size_categories:1K<n<10K",
"language:ru",
"license:cc-by-4.0",
"fact-checking",
"region:us"
] |
2023-05-05T08:51:38+00:00
|
{"language": ["ru"], "license": "cc-by-4.0", "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "tags": ["fact-checking"]}
|
2023-05-05T14:59:44+00:00
|
07e4ed3ac17b20711039daf4418b5055120b964c
|
## Dataset Description
We present a novel and comprehensive dataset which contains Turkish Republic coins minted since 1924.
The proposed dataset consists of 11080 coin images from 138 different classes. All images are tightly cropped, RGB and 256x256.
- **Point of Contact:** [Huseyin Temiz](mailto:[email protected])
### Citation Information
```bibtex
@inproceedings{temiz2021turcoins,
title={TurCoins: Turkish republic coin dataset},
author={Temiz, H{\"u}seyin and G{\"o}kberk, Berk and Akarun, Lale},
booktitle={2021 29th Signal Processing and Communications Applications Conference (SIU)},
pages={1--4},
year={2021},
organization={IEEE}
}
```
|
hsyntemiz/turcoins
|
[
"region:us"
] |
2023-05-05T09:43:22+00:00
|
{}
|
2023-05-23T17:35:41+00:00
|
287cb9f483ef76d73587095803cab583c736bf8d
|
sarrouche/tat-qa-numeric
|
[
"license:openrail",
"region:us"
] |
2023-05-05T09:52:13+00:00
|
{"license": "openrail"}
|
2023-05-05T10:00:51+00:00
|
|
e54fcac8b024820df0ef6ba1adfcfdbb244e6a41
|
# EasyPortrait - Face Parsing and Portrait Segmentation Dataset

We introduce a large-scale image dataset **EasyPortrait** for portrait segmentation and face parsing. Proposed dataset can be used in several tasks, such as background removal in conference applications, teeth whitening, face skin enhancement, red eye removal or eye colorization, and so on.
EasyPortrait dataset size is about **26GB**, and it contains **20 000** RGB images (~17.5K FullHD images) with high quality annotated masks. This dataset is divided into training set, validation set and test set by subject `user_id`. The training set includes 14000 images, the validation set includes 2000 images, and the test set includes 4000 images.
Training images were received from 5,947 unique users, while validation was from 860 and testing was from 1,570. On average, each EasyPortrait image has 254 polygon points, from which it can be concluded that the annotation is of high quality. Segmentation masks were created from polygons for each annotation.
For more information see our paper [EasyPortrait โ Face Parsing and Portrait Segmentation Dataset](https://arxiv.org/abs/2304.13509).
## The model results trained on the EasyPortrait dataset
Example of the model work trained on the EasyPortrait dataset and tested on test data from a different domain:


Example of the model work trained on the EasyPortrait dataset and tested on test data with a domain:


## Structure
```
.
โโโ images.zip
โ โโโ train/ # Train set: 14k
โ โโโ val/ # Validation set: 2k
โ โโโ test/ # Test set: 4k
โโโ annotations.zip
โ โโโ meta.zip # Meta-information (width, height, brightness, imhash, user_id)
โ โโโ train/
โ โโโ val/
โ โโโ test/
...
```
## Annotations
Annotations are presented as 2D-arrays, images in *.png format with several classes:
| Index | Class |
|------:|:-----------|
| 0 | BACKGROUND |
| 1 | PERSON |
| 2 | SKIN |
| 3 | LEFT BROW |
| 4 | RIGHT_BROW |
| 5 | LEFT_EYE |
| 6 | RIGHT_EYE |
| 7 | LIPS |
| 8 | TEETH |
Also, we provide some additional meta-information for dataset in `annotations/meta.zip` file:
| | attachment_id | user_id | data_hash | width | height | brightness | train | test | valid |
|---:|:--------------|:--------|:----------|------:|-------:|-----------:|:------|:------|:------|
| 0 | de81cc1c-... | 1b... | e8f... | 1440 | 1920 | 136 | True | False | False |
| 1 | 3c0cec5a-... | 64... | df5... | 1440 | 1920 | 148 | False | False | True |
| 2 | d17ca986-... | cf... | a69... | 1920 | 1080 | 140 | False | True | False |
where:
- `attachment_id` - image file name without extension
- `user_id` - unique anonymized user ID
- `data_hash` - image hash by using Perceptual hashing
- `width` - image width
- `height` - image height
- `brightness` - image brightness
- `train`, `test`, `valid` are the binary columns for train / test / val subsets respectively
## Authors and Credits
- [Alexander Kapitanov](https://www.linkedin.com/in/hukenovs)
- [Karina Kvanchiani](https://www.linkedin.com/in/kvanchiani)
- [Sofia Kirillova](https://www.linkedin.com/in/gofixyourself/)
## Links
- [arXiv](https://arxiv.org/abs/2304.13509)
- [Paperswithcode](https://paperswithcode.com/dataset/easyportrait)
- [Kaggle](https://www.kaggle.com/datasets/kapitanov/easyportrait)
- [Habr](https://habr.com/ru/companies/sberdevices/articles/731794/)
- [Gitlab](https://gitlab.aicloud.sbercloud.ru/rndcv/easyportrait)
## Citation
You can cite the paper using the following BibTeX entry:
@article{EasyPortrait,
title={EasyPortrait - Face Parsing and Portrait Segmentation Dataset},
author={Kapitanov, Alexander and Kvanchiani, Karina and Kirillova Sofia},
journal={arXiv preprint arXiv:2304.13509},
year={2023}
}
## License
<a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-sa/4.0/88x31.png" /></a><br />This work is licensed under a variant of <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/">Creative Commons Attribution-ShareAlike 4.0 International License</a>.
Please see the specific [license](https://github.com/hukenovs/easyportrait/blob/master/license/en_us.pdf).
|
gofixyourself/EasyPortrait
|
[
"task_categories:image-segmentation",
"task_ids:semantic-segmentation",
"annotations_creators:crowdsourced",
"size_categories:10K<n<100K",
"source_datasets:original",
"license:cc-by-sa-4.0",
"portrait-segmentation",
"face-parsing",
"face-beautification",
"arxiv:2304.13509",
"region:us"
] |
2023-05-05T09:58:42+00:00
|
{"annotations_creators": ["crowdsourced"], "license": "cc-by-sa-4.0", "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["image-segmentation"], "task_ids": ["semantic-segmentation"], "paperswithcode_id": "easyportrait", "pretty_name": "EasyPortrait", "tags": ["portrait-segmentation", "face-parsing", "face-beautification"]}
|
2023-05-12T11:41:47+00:00
|
d145929225016f08f9ceef8971039a2e9e935f23
|
# Dataset Card for "test-onepiece-dataset"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
ybelkada/test-onepiece-dataset
|
[
"region:us"
] |
2023-05-05T10:04:07+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "char_name", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1463424.0, "num_examples": 10}], "download_size": 1465392, "dataset_size": 1463424.0}}
|
2023-05-05T10:04:09+00:00
|
8ed0e89d239e4debed2a403f571271d74518985a
|
# Dataset Card for "ei-abstract-significance"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
hpi-dhc/evidence-inference-simple
|
[
"region:us"
] |
2023-05-05T10:04:23+00:00
|
{"dataset_info": {"features": [{"name": "pmcid", "dtype": "int32"}, {"name": "pmid", "dtype": "int32"}, {"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "no significant effect", "1": "significant effect"}}}}], "splits": [{"name": "train", "num_bytes": 1930106, "num_examples": 1028}, {"name": "validation", "num_bytes": 229838, "num_examples": 118}, {"name": "test", "num_bytes": 230635, "num_examples": 123}], "download_size": 0, "dataset_size": 2390579}}
|
2023-10-09T12:37:05+00:00
|
ef7a270fa399e3f77dc3ec7da05c0c54b27f9560
|
# CC-100 zh-Hant (Traditional Chinese)
From https://data.statmt.org/cc-100/, only zh-Hant - Chinese (Traditional). Broken into lines, with each line as a row.
Estimated to have around 4B tokens when tokenized with the [`bigscience/bloom`](https://huggingface.co/bigscience/bloom) tokenizer.
There's another version that the text is split by paragraphs instead of lines: [`zetavg/CC-100-zh-Hant-merged`](https://huggingface.co/datasets/zetavg/CC-100-zh-Hant-merged).
## References
Please cite the following if you found the resources in the CC-100 corpus useful.
* **Unsupervised Cross-lingual Representation Learning at Scale**, *Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmรกn, Edouard Grave, Myle Ott, Luke Zettlemoyer, Veselin Stoyanov*, Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics (ACL), p. 8440-8451, July 2020, [pdf](https://www.aclweb.org/anthology/2020.acl-main.747.pdf), [bib](https://www.aclweb.org/anthology/2020.acl-main.747.bib) .
* **CCNet: Extracting High Quality Monolingual Datasets from Web Crawl Data**, *Guillaume Wenzek, Marie-Anne Lachaux, Alexis Conneau, Vishrav Chaudhary, Francisco Guzmรกn, Armand Joulin, Edouard Grave*, Proceedings of the 12th Language Resources and Evaluation Conference (LREC), p. 4003-4012, May 2020, [pdf](https://www.aclweb.org/anthology/2020.lrec-1.494.pdf), [bib](https://www.aclweb.org/anthology/2020.lrec-1.494.bib).
|
zetavg/CC-100-zh-Hant
|
[
"task_categories:text-generation",
"language:zh",
"region:us"
] |
2023-05-05T10:15:10+00:00
|
{"language": ["zh"], "task_categories": ["text-generation"], "dataset_info": {"features": [{"name": "line", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 18198166302, "num_examples": 85165683}], "download_size": 13296002208, "dataset_size": 18198166302}}
|
2023-05-06T10:09:36+00:00
|
cb6bf34e6b4f61119ef651f67ab1d55d81c9cf5c
|
# Dataset Card for "hagrid250k-blip2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
MakiPan/hagrid250k-blip2
|
[
"region:us"
] |
2023-05-05T10:45:51+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "conditioning_image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 55187018424.0, "num_examples": 250000}], "download_size": 23850413715, "dataset_size": 55187018424.0}}
|
2023-05-05T11:21:50+00:00
|
f54691b13db8378776512592ff0a5868cffe1512
|
### brainly.co.id dataset
### Data Structure
The keys in each JSONL object include:
- "id": An integer value representing the page of task from url (e.g. brainly.co.id/tugas/117).
- "subject": A string indicating the subject of the question (e.g., "Fisika", "Matematika", "Sejarah").
- "author": A string representing the author of the question.
- "instruction": A string providing the instruction or prompt for the question.
- "answerer_1", "answer_2": Strings representing the answerers for the question. The number at the end of the key (1 & 2) signifies the answer's index.
- "answer_1", "answer_2": Strings containing the answers provided by the answerers. The number at the end of the key corresponds to the answerer index.
- "status_1", "status_2": Strings indicating the status of the answers (e.g., "verified", "loved", "generic").
|
niizam/brainly
|
[
"task_categories:question-answering",
"language:id",
"license:unlicense",
"region:us"
] |
2023-05-05T11:01:28+00:00
|
{"language": ["id"], "license": "unlicense", "task_categories": ["question-answering"]}
|
2023-06-05T00:08:31+00:00
|
04d4788c141915e742e7dac7478f3cc939ee64c6
|
# TempoSum: Evaluating the Temporal Generalization of Abstractive Summarization
## Dataset Description
- **Repository:** https://github.com/AndyCheang/TempoSum
- **Paper:** https://arxiv.org/abs/2305.01951
|
chiseng-cheang/TempoSum
|
[
"task_categories:summarization",
"language:en",
"arxiv:2305.01951",
"region:us"
] |
2023-05-05T11:08:29+00:00
|
{"language": ["en"], "task_categories": ["summarization"]}
|
2023-05-06T15:31:27+00:00
|
d2470537a770e5a5d34951b44b9e22590a6c328a
|
thegodgroup/key
|
[
"license:apache-2.0",
"region:us"
] |
2023-05-05T11:15:09+00:00
|
{"license": "apache-2.0"}
|
2023-05-05T11:15:09+00:00
|
|
9c4cea9916dd14721bd860bd57c0adace40f8805
|
# Dataset Card for "data-nonmembers-200"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
JotDe/data-nonmembers-200
|
[
"region:us"
] |
2023-05-05T11:15:16+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 20633377.82334853, "num_examples": 200}], "download_size": 20592103, "dataset_size": 20633377.82334853}}
|
2023-05-05T11:15:24+00:00
|
6d0ab0154bbaef8b0f960b086c4405cc0ab71933
|
innermost47/alpaca-fr
|
[
"task_categories:text-generation",
"language:fr",
"license:cc-by-nc-4.0",
"region:us"
] |
2023-05-05T11:49:20+00:00
|
{"language": ["fr"], "license": "cc-by-nc-4.0", "task_categories": ["text-generation"]}
|
2023-05-05T12:02:58+00:00
|
|
81769a93dd0605bd734abf833448be4e2b86afee
|
# Dataset Card for "MetadataTable3"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
lighteval/MetadataTable3
|
[
"region:us"
] |
2023-05-05T11:53:12+00:00
|
{"dataset_info": {"features": [{"name": "name", "dtype": "string"}, {"name": "hf_repo", "dtype": "string"}, {"name": "hf_subset", "dtype": "string"}, {"name": "hf_avail_splits", "sequence": "string"}, {"name": "evaluation_splits", "sequence": "string"}, {"name": "generation_size", "dtype": "int64"}, {"name": "metric", "sequence": "string"}, {"name": "suite", "sequence": "string"}, {"name": "prompt_function", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 106419, "num_examples": 598}], "download_size": 22843, "dataset_size": 106419}}
|
2023-05-05T11:53:16+00:00
|
e566d1536d0797c55c710925296f1d629ae661e5
|
# Dataset Card for "VQAv2_validation_no_image_google_flan_t5_xxl_mode_A_T_D_PNP_FILTER_C_Q_rices_ns_214354"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
CVasNLPExperiments/VQAv2_validation_no_image_google_flan_t5_xxl_mode_A_T_D_PNP_FILTER_C_Q_rices_ns_214354
|
[
"region:us"
] |
2023-05-05T12:08:23+00:00
|
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "question", "dtype": "string"}, {"name": "true_label", "sequence": "string"}, {"name": "prediction", "dtype": "string"}], "splits": [{"name": "fewshot_0_clip_tags_LAION_ViT_H_14_2B_with_openai_Attributes_LAION_ViT_H_14_2B_descriptors_text_davinci_003_full_DETA_detections_deta_swin_large_o365_coco_classes_caption_module_random_", "num_bytes": 30527860, "num_examples": 214354}], "download_size": 10955360, "dataset_size": 30527860}}
|
2023-05-05T12:08:27+00:00
|
7356f4f786b87c6bcb3c38d8b6d4b5e75aadb718
|
michaelwzhu/ShenNong_TCM_Dataset
|
[
"license:apache-2.0",
"region:us"
] |
2023-05-05T12:18:07+00:00
|
{"license": "apache-2.0"}
|
2023-06-25T12:29:04+00:00
|
|
f357e2bac23c7a47772b8e50447bc0023c682b79
|
# Dataset Card for "sam-coyo-3k"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mfidabel/sam-coyo-3k
|
[
"region:us"
] |
2023-05-05T12:22:13+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "conditioning_image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2194166543.838, "num_examples": 3113}], "download_size": 2199147437, "dataset_size": 2194166543.838}}
|
2023-05-05T12:24:07+00:00
|
e3e48d8388393ba054ec0824397184d89c4266d5
|
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks](#supported-tasks)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Statistics](#dataset-statistics)
- [Usage](#usage)
- [Additional Information](#additional-information)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Repository:** [FSoft-AI4Code/TheVault](https://github.com/FSoft-AI4Code/TheVault)
- **Paper:** [The Vault: A Comprehensive Multilingual Dataset for Advancing Code Understanding and Generation](https://arxiv.org/abs/2305.06156)
- **Contact:** [email protected]
- **Website:** https://www.fpt-aicenter.com/ai-residency/
<p align="center">
<img src="https://raw.githubusercontent.com/FSoft-AI4Code/TheVault/main/assets/the-vault-4-logo-png.png" width="300px" alt="logo">
</p>
<div align="center">
# The Vault: A Comprehensive Multilingual Dataset for Advancing Code Understanding and Generation
</div>
## Dataset Summary
The Vault dataset is a comprehensive, large-scale, multilingual parallel dataset that features high-quality code-text pairs derived from The Stack, the largest permissively-licensed source code dataset.
We provide The Vault which contains code snippets from 10 popular programming languages such as Java, JavaScript, Python, Ruby, Rust, Golang, C#, C++, C, and PHP. This dataset provides multiple code-snippet levels, metadata, and 11 docstring styles for enhanced usability and versatility.
## Supported Tasks
The Vault can be used for pretraining LLMs or downstream code-text interaction tasks. A number of tasks related to code understanding and geneartion can be constructed using The Vault such as *code summarization*, *text-to-code generation* and *code search*.
## Languages
The natural language text (docstring) is in English.
10 programming languages are supported in The Vault: `Python`, `Java`, `JavaScript`, `PHP`, `C`, `C#`, `C++`, `Go`, `Ruby`, `Rust`
## Dataset Structure
### Data Instances
```
{
"hexsha": "5c47f0b4c173a8fd03e4e633d9b3dd8211e67ad0",
"repo": "neumanna94/beepboop",
"path": "js/scripts.js",
"license": [
"MIT"
],
"language": "JavaScript",
"identifier": "beepBoopSelector",
"return_type": "<not_specific>",
"original_string": "function beepBoopSelector(inputString, bbFunction){\n if(bbFunction==1){\n return beepBoop(inputString);\n } else if(bbFunction==2){\n return beepBoop2(inputString);\n } else if(bbFunction==3){\n return beepBoop3(inputString);\n } else {\n }\n}",
"original_docstring": "//Determines what beepBoop function to use",
"docstring": "Determines what beepBoop function to use",
"docstring_tokens": [
"Determines",
"what",
"beepBoop",
"function",
"to",
"use"
],
"code": "function beepBoopSelector(inputString, bbFunction){\n if(bbFunction==1){\n return beepBoop(inputString);\n } else if(bbFunction==2){\n return beepBoop2(inputString);\n } else if(bbFunction==3){\n return beepBoop3(inputString);\n } else {\n }\n}",
"code_tokens": [
"function",
"beepBoopSelector",
"(",
"inputString",
",",
"bbFunction",
")",
"{",
"if",
"(",
"bbFunction",
"==",
"1",
")",
"{",
"return",
"beepBoop",
"(",
"inputString",
")",
";",
"}",
"else",
"if",
"(",
"bbFunction",
"==",
"2",
")",
"{",
"return",
"beepBoop2",
"(",
"inputString",
")",
";",
"}",
"else",
"if",
"(",
"bbFunction",
"==",
"3",
")",
"{",
"return",
"beepBoop3",
"(",
"inputString",
")",
";",
"}",
"else",
"{",
"}",
"}"
],
"short_docstring": "Determines what beepBoop function to use",
"short_docstring_tokens": [
"Determines",
"what",
"beepBoop",
"function",
"to",
"use"
],
"comment": [],
"parameters": [
{
"param": "inputString",
"type": null
},
{
"param": "bbFunction",
"type": null
}
],
"docstring_params": {
"returns": [],
"raises": [],
"params": [
{
"identifier": "inputString",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "bbFunction",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
}
```
### Data Fields
Data fields for function level:
- **hexsha** (string):ย the unique git hash of file
- **repo** (string):ย the owner/repo
- **path**ย (string): the full path to the original file
- **license** (list): licenses in the repo
- **language**ย (string): the programming language
- **identifier**ย (string): the function or method name
- **return_type**ย (string): the type returned by the function
- **original_string** (string): original version of function/class node
- **original_docstring**ย (string): the raw string before tokenization or parsing
- **code**ย (string): the part of the originalย that is code
- **code_tokens**ย (list): tokenized version ofย `code`
- **short_docstring** (string): short, brief summarization (first line of the docstring)
- **short_docstring_tokens** (list): tokenized version of `short_docstring
- **docstring**ย (string): the top-level comment or docstring (docstring version without paramโs doc, return, exception fields, etc)
- **docstring_tokens**ย (list): tokenized version ofย docstring
- **comment** (list): list of comments (line) inside the function/class
- **parameters** (list): List of parameters and its type (type can be None)
- **docstring_params** (dict): Dictionary of the parsed information from docstring
See [here](https://github.com/FSoft-AI4Code/TheVault/blob/main/data/README.md) for more details and examples.
### Data Splits
In this repo, The Vault is divided into 5 subsets, where three training versions are split based on size of the full training set, and the remains are validation set and test set (approximate 20,000 samples in each). The statistic for languages in each split set is illustrated in the following section.
Before split, the dataset is deduplicated. There are 3 versions of training set that are small (5%), medium (20%) and large (100%).
## Dataset Statistics
- Compare to other benchmarks
| Dataset | #Language | #Code-text pair |
|:--------------------------|----------:|-----------------:|
| PyMT5 | 1 | โ 7,700,000 |
| CoDesc | 1 | 4,211,516 |
| CodeSearchNet | 6 | 2,326,976 |
| CodeSearchNet (CodeXGLUE) | 6 | 1,005,474 |
| Deepcom | 1 | 424,028 |
| CONCODE | 1 | 2,184,310 |
| Funcom | 1 | 2,149,121 |
| CodeT5 | 8 | 3,158,313 |
| **The Vault** | **10** | **34,098,775** |
- Statistic for split sets
| | train/small | train/medium | train/full | validation | test | total |
|:-----------|------------:|-------------:|-----------:|-----------:|-------:|--------------:|
|Python | 370,657 | 1,952,110 | 7,772,647 | 30,992 | 21,652 | 7,825,291 |
|Java | 351,213 | 1,612,366 | 6,629,193 | 22,677 | 15,552 | 6,667,422 |
|JavaScript | 82,931 | 404,729 | 1,640,416 | 22,044 | 21,108 | 1,683,568 |
|PHP | 236,638 | 1,155,476 | 4,656,371 | 21,375 | 19,010 | 4,696,756 |
|C | 105,978 | 381,207 | 1,639,319 | 27,525 | 19,122 | 1,685,966 |
|C# | 141,090 | 783,166 | 3,305,891 | 24,787 | 19,638 | 3,350,316 |
|C++ | 87,420 | 410,907 | 1,671,268 | 20,011 | 18,169 | 1,709,448 |
|Go | 267,535 | 1,319,547 | 5,109,020 | 19,102 | 25,314 | 5,153,436 |
|Ruby | 23,921 | 112,574 | 424,339 | 17,338 | 19,908 | 461,585 |
|Rust | 35,367 | 224,015 | 825,130 | 16,716 | 23,141 | 864,987 |
|TOTAL | 1,702,750 | 8,356,097 |33,673,594 |222,567 |202,614 |**34,098,775** |
## Usage
You can load The Vault dataset using datasets library: ```pip install datasets```
```python
from datasets import load_dataset
# Load full function level dataset (34M samples)
dataset = load_dataset("Fsoft-AIC/the-vault-function")
# Load function level train/validation/test set
dataset = load_dataset("Fsoft-AIC/the-vault-function", split_set=["train"])
# Load "small" (or "medium", "full") version of function level training set
dataset = load_dataset("Fsoft-AIC/the-vault-function", split_set=["train/small"])
# specific language (e.g. Python)
dataset = load_dataset("Fsoft-AIC/the-vault-function", split_set=["train"], languages=['Python'])
# dataset streaming
data = load_dataset("Fsoft-AIC/the-vault-function", split_set= ["train"], streaming= True)
for sample in iter(data['train']):
print(sample)
```
A back up dataset can be downloaded in azure storage. See [Download The Vault from Azure blob storage](https://github.com/FSoft-AI4Code/TheVault#download-via-link).
## Additional information
### Licensing Information
MIT License
### Citation Information
```
@article{manh2023vault,
title={The Vault: A Comprehensive Multilingual Dataset for Advancing Code Understanding and Generation},
author={Manh, Dung Nguyen and Hai, Nam Le and Dau, Anh TV and Nguyen, Anh Minh and Nghiem, Khanh and Guo, Jin and Bui, Nghi DQ},
journal={arXiv preprint arXiv:2305.06156},
year={2023}
}
```
### Contributions
This dataset is developed by [FSOFT AI4Code team](https://github.com/FSoft-AI4Code).
|
Fsoft-AIC/the-vault-function
|
[
"task_categories:text-generation",
"multilinguality:multiprogramming languages",
"language:code",
"language:en",
"license:mit",
"arxiv:2305.06156",
"region:us"
] |
2023-05-05T13:25:47+00:00
|
{"language": ["code", "en"], "license": "mit", "multilinguality": ["multiprogramming languages"], "task_categories": ["text-generation"], "pretty_name": "The Vault Function", "dataset_info": {"features": [{"name": "identifier", "dtype": "string"}, {"name": "return_type", "dtype": "string"}, {"name": "repo", "dtype": "string"}, {"name": "path", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "code", "dtype": "string"}, {"name": "code_tokens", "dtype": "string"}, {"name": "original_docstring", "dtype": "string"}, {"name": "comment", "dtype": "string"}, {"name": "docstring_tokens", "dtype": "string"}, {"name": "docstring", "dtype": "string"}, {"name": "original_string", "dtype": "string"}]}, "viewer": true}
|
2023-07-04T01:33:36+00:00
|
b9ed663ec8f3559ac5ae74c764f0d53bfc7a9db8
|
crbpfx/KWX
|
[
"task_categories:text-generation",
"size_categories:10K<n<100K",
"language:en",
"license:cc-by-4.0",
"keywords",
"scholarly articles",
"region:us"
] |
2023-05-05T13:34:27+00:00
|
{"language": ["en"], "license": "cc-by-4.0", "size_categories": ["10K<n<100K"], "task_categories": ["text-generation"], "pretty_name": "KWX: scholarly articles with keywords from arXiv", "tags": ["keywords", "scholarly articles"]}
|
2023-06-14T16:26:14+00:00
|
|
fea2e29601673e912a91f423e58abc5d83ce0f9e
|
# Dataset Summary
This dataset uses Poloclub's DiffusionDb dataset. We downsampled and upsampled the images to create a pixelated effect.
# How To Use
```
from datasets import load_dataset
dataset = load_dataset("Kalva014/Pixelated_Images")
```
# Dataset Structure
The dataset contains .jpg images as well as their prompts.
# Data Fields
Every instance for is an image-prompt pair.
Example:
```
{'image': '0.jpg',
'prompt': 'official portrait of mark wahlberg as the emperor of vietnam, 1 9 4 5. oil on canvas trending on artstation '}
```
---
dataset_info:
features:
- image: image
dtype: image
- prompt: string
dtype: string
splits:
- name: train
---
# Dataset Card for "Pokemon_Images"
|
Kalva014/Pixelated_Images
|
[
"region:us"
] |
2023-05-05T13:39:45+00:00
|
{}
|
2023-05-10T13:26:19+00:00
|
e11c7ff7e0e0e727020e72f3464409d891b8a27f
|
chats-bug/test-image-caption-Listed
|
[
"license:mit",
"region:us"
] |
2023-05-05T14:12:47+00:00
|
{"license": "mit"}
|
2023-05-05T14:13:26+00:00
|
|
dea64510a810ddebc99c39ed13dafc7bdf87a4fa
|
This dataset is the machine translation of the GPT4 dataset provided on Alpaca-Lora github repository.
We provide two version: The full translation, and a translation of a subset of ~50 000 entries that was cleaned and do not contain instances of "I am an AI language model" or similar.
This work was inspired from the French alpaca lora variant **Vigogne** and the Ukrainian alpaca lora variante **Kruk**.
|
jeremyc/Alpaca-Lora-GPT4-Swedish
|
[
"size_categories:10K<n<100K",
"language:sv",
"region:us"
] |
2023-05-05T14:20:12+00:00
|
{"language": ["sv"], "size_categories": ["10K<n<100K"], "pretty_name": "Alpaca-Lora GPT4 Swedish"}
|
2023-05-06T07:20:32+00:00
|
539d638f863a26795cf86eefb6135243ad7da13d
|
raviswam/dataset
|
[
"license:mit",
"region:us"
] |
2023-05-05T14:32:50+00:00
|
{"license": "mit"}
|
2023-05-05T14:34:57+00:00
|
|
ecef156df0274a00b3f25cf8f8061535169a32d3
|
# Dataset Card for CIFAR-100-LT (Long Tail)
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Additional Information](#additional-information)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [CIFAR Datasets](https://www.cs.toronto.edu/~kriz/cifar.html)
- **Paper:** [Paper imbalanced example](https://openaccess.thecvf.com/content_CVPR_2019/papers/Cui_Class-Balanced_Loss_Based_on_Effective_Number_of_Samples_CVPR_2019_paper.pdf)
- **Leaderboard:** [r-10](https://paperswithcode.com/sota/long-tail-learning-on-cifar-100-lt-r-10) [r-100](https://paperswithcode.com/sota/long-tail-learning-on-cifar-100-lt-r-100)
### Dataset Summary
The CIFAR-100-LT imbalanced dataset is comprised of under 60,000 color images, each measuring 32x32 pixels,
distributed across 100 distinct classes.
The number of samples within each class decreases exponentially with factors of 10 and 100.
The dataset includes 10,000 test images, with 100 images per class,
and fewer than 50,000 training images.
These 100 classes are further organized into 20 overarching superclasses.
Each image is assigned two labels: a fine label denoting the specific class,
and a coarse label representing the associated superclass.
### Supported Tasks and Leaderboards
- `image-classification`: The goal of this task is to classify a given image into one of 100 classes. The leaderboard is available [here](https://paperswithcode.com/sota/long-tail-learning-on-cifar-100-lt-r-100).
### Languages
English
## Dataset Structure
### Data Instances
A sample from the training set is provided below:
```
{
'img': <PIL.PngImagePlugin.PngImageFile image mode=RGB size=32x32 at 0x2767F58E080>, 'fine_label': 19,
'coarse_label': 11
}
```
### Data Fields
- `img`: A `PIL.Image.Image` object containing the 32x32 image. Note that when accessing the image column: `dataset[0]["image"]` the image file is automatically decoded. Decoding of a large number of image files might take a significant amount of time. Thus it is important to first query the sample index before the `"image"` column, *i.e.* `dataset[0]["image"]` should **always** be preferred over `dataset["image"][0]`
- `fine_label`: an `int` classification label with the following mapping:
`0`: apple
`1`: aquarium_fish
`2`: baby
`3`: bear
`4`: beaver
`5`: bed
`6`: bee
`7`: beetle
`8`: bicycle
`9`: bottle
`10`: bowl
`11`: boy
`12`: bridge
`13`: bus
`14`: butterfly
`15`: camel
`16`: can
`17`: castle
`18`: caterpillar
`19`: cattle
`20`: chair
`21`: chimpanzee
`22`: clock
`23`: cloud
`24`: cockroach
`25`: couch
`26`: cra
`27`: crocodile
`28`: cup
`29`: dinosaur
`30`: dolphin
`31`: elephant
`32`: flatfish
`33`: forest
`34`: fox
`35`: girl
`36`: hamster
`37`: house
`38`: kangaroo
`39`: keyboard
`40`: lamp
`41`: lawn_mower
`42`: leopard
`43`: lion
`44`: lizard
`45`: lobster
`46`: man
`47`: maple_tree
`48`: motorcycle
`49`: mountain
`50`: mouse
`51`: mushroom
`52`: oak_tree
`53`: orange
`54`: orchid
`55`: otter
`56`: palm_tree
`57`: pear
`58`: pickup_truck
`59`: pine_tree
`60`: plain
`61`: plate
`62`: poppy
`63`: porcupine
`64`: possum
`65`: rabbit
`66`: raccoon
`67`: ray
`68`: road
`69`: rocket
`70`: rose
`71`: sea
`72`: seal
`73`: shark
`74`: shrew
`75`: skunk
`76`: skyscraper
`77`: snail
`78`: snake
`79`: spider
`80`: squirrel
`81`: streetcar
`82`: sunflower
`83`: sweet_pepper
`84`: table
`85`: tank
`86`: telephone
`87`: television
`88`: tiger
`89`: tractor
`90`: train
`91`: trout
`92`: tulip
`93`: turtle
`94`: wardrobe
`95`: whale
`96`: willow_tree
`97`: wolf
`98`: woman
`99`: worm
- `coarse_label`: an `int` coarse classification label with following mapping:
`0`: aquatic_mammals
`1`: fish
`2`: flowers
`3`: food_containers
`4`: fruit_and_vegetables
`5`: household_electrical_devices
`6`: household_furniture
`7`: insects
`8`: large_carnivores
`9`: large_man-made_outdoor_things
`10`: large_natural_outdoor_scenes
`11`: large_omnivores_and_herbivores
`12`: medium_mammals
`13`: non-insect_invertebrates
`14`: people
`15`: reptiles
`16`: small_mammals
`17`: trees
`18`: vehicles_1
`19`: vehicles_2
### Data Splits
| name |train|test|
|----------|----:|---------:|
|cifar100|<50000| 10000|
### Licensing Information
Apache License 2.0
### Citation Information
```
@TECHREPORT{Krizhevsky09learningmultiple,
author = {Alex Krizhevsky},
title = {Learning multiple layers of features from tiny images},
institution = {},
year = {2009}
}
```
### Contributions
Thanks to [@gchhablani](https://github.com/gchablani) and all contributors for adding the original balanced cifar100 dataset.
|
tomas-gajarsky/cifar100-lt
|
[
"task_categories:image-classification",
"annotations_creators:crowdsourced",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:cifar100",
"language:en",
"license:apache-2.0",
"region:us"
] |
2023-05-05T14:43:58+00:00
|
{"annotations_creators": ["crowdsourced"], "language_creators": ["found"], "language": ["en"], "license": "apache-2.0", "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "source_datasets": ["cifar100"], "task_categories": ["image-classification"], "task_ids": [], "paperswithcode_id": "cifar-100", "pretty_name": "Cifar100-LT", "dataset_info": {"features": [{"name": "img", "dtype": "image"}, {"name": "fine_label", "dtype": {"class_label": {"names": {"0": "apple", "1": "aquarium_fish", "2": "baby", "3": "bear", "4": "beaver", "5": "bed", "6": "bee", "7": "beetle", "8": "bicycle", "9": "bottle", "10": "bowl", "11": "boy", "12": "bridge", "13": "bus", "14": "butterfly", "15": "camel", "16": "can", "17": "castle", "18": "caterpillar", "19": "cattle", "20": "chair", "21": "chimpanzee", "22": "clock", "23": "cloud", "24": "cockroach", "25": "couch", "26": "cra", "27": "crocodile", "28": "cup", "29": "dinosaur", "30": "dolphin", "31": "elephant", "32": "flatfish", "33": "forest", "34": "fox", "35": "girl", "36": "hamster", "37": "house", "38": "kangaroo", "39": "keyboard", "40": "lamp", "41": "lawn_mower", "42": "leopard", "43": "lion", "44": "lizard", "45": "lobster", "46": "man", "47": "maple_tree", "48": "motorcycle", "49": "mountain", "50": "mouse", "51": "mushroom", "52": "oak_tree", "53": "orange", "54": "orchid", "55": "otter", "56": "palm_tree", "57": "pear", "58": "pickup_truck", "59": "pine_tree", "60": "plain", "61": "plate", "62": "poppy", "63": "porcupine", "64": "possum", "65": "rabbit", "66": "raccoon", "67": "ray", "68": "road", "69": "rocket", "70": "rose", "71": "sea", "72": "seal", "73": "shark", "74": "shrew", "75": "skunk", "76": "skyscraper", "77": "snail", "78": "snake", "79": "spider", "80": "squirrel", "81": "streetcar", "82": "sunflower", "83": "sweet_pepper", "84": "table", "85": "tank", "86": "telephone", "87": "television", "88": "tiger", "89": "tractor", "90": "train", "91": "trout", "92": "tulip", "93": "turtle", "94": "wardrobe", "95": "whale", "96": "willow_tree", "97": "wolf", "98": "woman", "99": "worm"}}}}, {"name": "coarse_label", "dtype": {"class_label": {"names": {"0": "aquatic_mammals", "1": "fish", "2": "flowers", "3": "food_containers", "4": "fruit_and_vegetables", "5": "household_electrical_devices", "6": "household_furniture", "7": "insects", "8": "large_carnivores", "9": "large_man-made_outdoor_things", "10": "large_natural_outdoor_scenes", "11": "large_omnivores_and_herbivores", "12": "medium_mammals", "13": "non-insect_invertebrates", "14": "people", "15": "reptiles", "16": "small_mammals", "17": "trees", "18": "vehicles_1", "19": "vehicles_2"}}}}], "config_name": "cifar100", "splits": [{"name": "train"}, {"name": "test", "num_bytes": 22605519, "num_examples": 10000}], "download_size": 169001437}}
|
2023-12-10T21:57:46+00:00
|
cb99cf723f49f1dd43b7412d2a2cb5cd230ee390
|
dmgold/last_poem_data
|
[
"license:openrail",
"region:us"
] |
2023-05-05T14:44:58+00:00
|
{"license": "openrail"}
|
2023-05-05T14:45:58+00:00
|
|
c0a8257abe555435213e85a0bc94460788bc31c3
|
dmgold/train_pushkin
|
[
"license:openrail",
"region:us"
] |
2023-05-05T14:46:44+00:00
|
{"license": "openrail"}
|
2023-05-05T14:47:11+00:00
|
|
c33a44db874af27b585c1361b498b53b75bd500e
|
# jm52m
jm52m is a dataset we created containing 52m Java methods from 52k Java projects. The source code originated from the Merobase and Sourcerer data releases, supplemented by our own prior work in LeClair et al. It contains code uploaded to code repositories between 2008 and 2018. We then extracted every Java method from every file and project. We removed empty methods, methods from corrupt files, and methods with parsing errors.
---
## jm52m file list
- fc_lsh_parts_0.X0 -- lsh directory where: X = threshold e.g. if threshold=0.5, X will be 5.
- fundats-j1.pkl -- a pickle file that is a dictionary for raw function code files with key = function id and value = raw code
- fundats-j1.json.gz -- a compressed version of json file that is a dictionary for raw function code files with key = function id and value = raw code
- q90testfids.pkl -- funcom Java methods test set ID files
- train.bin; val.bin -- bin files for training and fine-tuning models
- jm52m.sql.gz -- a compressed version of sql files for 52m Java methods
---
## jm52m dataset details
We provide the size of our dataset in the following table:
| Config | Value |
| ------- | ------- |
|number of tokens | 8,752,695,577|
|number of documents | 51,841,717|
|number of files | 8,402,038 |
|number of projects | 52,933 |
|megabytes after processing |16,695 |
We tokenize our data using scripts provided in our [github repository](https://github.com/apcl-research/jam/blob/main/data/jam_jm52m/prepare_fc_raw.py).
|
apcl/jm52m
|
[
"region:us"
] |
2023-05-05T14:50:26+00:00
|
{}
|
2023-05-12T04:06:44+00:00
|
48657ef9dfb9b58b680e727617a61323da586985
|
# Dataset Card for "librispeech-data"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
sanchit-gandhi/librispeech-data
|
[
"region:us"
] |
2023-05-05T15:06:41+00:00
|
{"dataset_info": {"features": [{"name": "file", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}, {"name": "chapter_id", "dtype": "int64"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train.clean.100", "num_bytes": 6623027227.062, "num_examples": 28539}, {"name": "train.clean.360", "num_bytes": 23910449107.828, "num_examples": 104014}, {"name": "train.other.500", "num_bytes": 31827722515.584, "num_examples": 148688}, {"name": "validation.clean", "num_bytes": 359889672.966, "num_examples": 2703}, {"name": "validation.other", "num_bytes": 337620033.648, "num_examples": 2864}, {"name": "test.clean", "num_bytes": 368013946.42, "num_examples": 2620}, {"name": "test.other", "num_bytes": 352742113.154, "num_examples": 2939}], "download_size": 61829574809, "dataset_size": 63779464616.662}}
|
2023-05-05T15:55:27+00:00
|
ba75aea9f1df4d22574399c3f5b2024fdf76a00e
|
# Dataset Card for "food101-tiny"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
ybelkada/food101-tiny
|
[
"region:us"
] |
2023-05-05T15:13:56+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "apple_pie", "1": "baby_back_ribs", "2": "baklava", "3": "beef_carpaccio", "4": "beef_tartare", "5": "beet_salad", "6": "beignets", "7": "bibimbap", "8": "bread_pudding", "9": "breakfast_burrito", "10": "bruschetta", "11": "caesar_salad", "12": "cannoli", "13": "caprese_salad", "14": "carrot_cake", "15": "ceviche", "16": "cheesecake", "17": "cheese_plate", "18": "chicken_curry", "19": "chicken_quesadilla", "20": "chicken_wings", "21": "chocolate_cake", "22": "chocolate_mousse", "23": "churros", "24": "clam_chowder", "25": "club_sandwich", "26": "crab_cakes", "27": "creme_brulee", "28": "croque_madame", "29": "cup_cakes", "30": "deviled_eggs", "31": "donuts", "32": "dumplings", "33": "edamame", "34": "eggs_benedict", "35": "escargots", "36": "falafel", "37": "filet_mignon", "38": "fish_and_chips", "39": "foie_gras", "40": "french_fries", "41": "french_onion_soup", "42": "french_toast", "43": "fried_calamari", "44": "fried_rice", "45": "frozen_yogurt", "46": "garlic_bread", "47": "gnocchi", "48": "greek_salad", "49": "grilled_cheese_sandwich", "50": "grilled_salmon", "51": "guacamole", "52": "gyoza", "53": "hamburger", "54": "hot_and_sour_soup", "55": "hot_dog", "56": "huevos_rancheros", "57": "hummus", "58": "ice_cream", "59": "lasagna", "60": "lobster_bisque", "61": "lobster_roll_sandwich", "62": "macaroni_and_cheese", "63": "macarons", "64": "miso_soup", "65": "mussels", "66": "nachos", "67": "omelette", "68": "onion_rings", "69": "oysters", "70": "pad_thai", "71": "paella", "72": "pancakes", "73": "panna_cotta", "74": "peking_duck", "75": "pho", "76": "pizza", "77": "pork_chop", "78": "poutine", "79": "prime_rib", "80": "pulled_pork_sandwich", "81": "ramen", "82": "ravioli", "83": "red_velvet_cake", "84": "risotto", "85": "samosa", "86": "sashimi", "87": "scallops", "88": "seaweed_salad", "89": "shrimp_and_grits", "90": "spaghetti_bolognese", "91": "spaghetti_carbonara", "92": "spring_rolls", "93": "steak", "94": "strawberry_shortcake", "95": "sushi", "96": "tacos", "97": "takoyaki", "98": "tiramisu", "99": "tuna_tartare", "100": "waffles"}}}}], "splits": [{"name": "train", "num_bytes": 5343359.0, "num_examples": 100}], "download_size": 5256650, "dataset_size": 5343359.0}}
|
2023-05-05T15:13:57+00:00
|
5cc1609a193139167f2e59cf945e9a4bfc0e1d7c
|
# Description
## Source Datasets:
Real:
- 20,000 images - openimages_20k_test_subset
- 9,476 images - imagenette
- 9,000 images - laion_subset
- 10,000 images - wikiart (randomly sampled)
- 1250 images randomly sampled from each of the following classes:
- `["religious-painting", "flower-painting", "genre-painting", "landscape", "marina", "mythological-painting", "nude-painting-nu", "portrait"]`
- 31,531 images - coco (randomly sampled)
Fake:
- 50,000 images - diffusiondb_2m_first_50k
- 11,000 images diffusiondb_2m_first_51k_61k
- 9,000 images - sd15_train
- 9,000 images - sd20_train
- 500 images - sd15_val
- 500 images - sd20_val
Labels:
- real = 0
- fake = 1
## Sizes:
- 160,000 images
- 80,000 real
- 80,000 fake
### Avg Image sizes
- avg_width: 662.68554375
- avg_height: 613.2390375
### Split class distribution
- real_train = 64000
- fake_train = 64000
- real_test = 8000
- fake_test = 8000
- real_va = 8000
- fake_val = 8000
|
Hanneseh/MPDL_Project_1_custom_data
|
[
"region:us"
] |
2023-05-05T15:15:52+00:00
|
{}
|
2023-05-05T19:37:23+00:00
|
ee418f1b4fee2461d21fdcd3a75afcd85299e5de
|
# qmsum-cleaned
## prefixes
It's worth noting that each "document" in `input` is prefixed by a question/prompt on what the model is supposed to do. **You may want to explicitly handle this in some way, or prefix your models trained on this dataset.**
Most frequent "prefixes" separated via [sentence-splitter](https://github.com/mediacloud/sentence-splitter) in the `train` split:
| | Sentence | Count |
|---:|:------------------------------------------------------------------------------|--------:|
| 0 | Summarize the whole meeting. | 121 |
| 1 | Summarize the meeting | 25 |
| 2 | What did the team discuss about the product cost? | 4 |
| 3 | How did Marketing design the product evaluation? | 4 |
| 4 | Summarize the wrap up of the meeting. | 3 |
| 5 | What did the group discuss about user requirements of the new remote control? | 3 |
| 6 | What did the team discuss during the product evaluation? | 3 |
| 7 | Summarize the meeting. | 2 |
| 8 | Summarize what was said about digits form | 2 |
| 9 | What was discussed in the meeting? | 2 |
### wordcloud
Visualized as a wordcloud (`train` split):

## token counts

|
pszemraj/qmsum-cleaned
|
[
"task_categories:text2text-generation",
"task_categories:summarization",
"size_categories:1K<n<10K",
"source_datasets:tau/scrolls",
"language:en",
"license:apache-2.0",
"scrolls",
"qmsum",
"region:us"
] |
2023-05-05T15:16:33+00:00
|
{"language": ["en"], "license": "apache-2.0", "size_categories": ["1K<n<10K"], "source_datasets": "tau/scrolls", "task_categories": ["text2text-generation", "summarization"], "tags": ["scrolls", "qmsum"]}
|
2023-11-21T13:14:02+00:00
|
87aa77b833d97ead03b09f8c7682bc6040ebb5e3
|
# Dataset Card for "sam-controlnet-sprint-large-groundedDINO-mask"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
SAMControlNet/sam-controlnet-sprint-large-groundedDINO-mask
|
[
"region:us"
] |
2023-05-05T15:25:45+00:00
|
{"dataset_info": {"features": [{"name": "original_image", "dtype": "image"}, {"name": "conditioning_image", "dtype": "image"}, {"name": "caption", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1105606779.42, "num_examples": 5340}], "download_size": 879967514, "dataset_size": 1105606779.42}}
|
2023-05-05T15:38:46+00:00
|
037868b50e926a7e32f0bebe9561aab0e6ba7578
|
mclovinxie/litigiven-showcase
|
[
"license:apache-2.0",
"region:us"
] |
2023-05-05T15:50:11+00:00
|
{"license": "apache-2.0"}
|
2023-05-05T16:06:50+00:00
|
|
f3ef4752673975737a5172d8a20787c6135099c4
|
Scraped images from Furaffinity and their corresponding (preprocessed) tags. May contain adult content.
|
megatomik/FAdataset
|
[
"license:unknown",
"region:us"
] |
2023-05-05T15:55:38+00:00
|
{"license": "unknown"}
|
2023-06-10T15:28:24+00:00
|
f0b53ce42804143606e5ece56e69f1a418d58ccf
|
# Dataset Card for "deepfashion_controlnet"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
ldhnam/deepfashion_controlnet
|
[
"region:us"
] |
2023-05-05T16:22:30+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "openpose", "dtype": "image"}, {"name": "cloth", "dtype": "image"}, {"name": "caption", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3781524968.6950803, "num_examples": 13670}, {"name": "test", "num_bytes": 2489665.30491995, "num_examples": 9}], "download_size": 3766499657, "dataset_size": 3784014634.0}}
|
2023-05-05T16:25:08+00:00
|
86fd3253fe765ffc8ed7765de2fd33648d29ada6
|
# Dataset Card for "requirement-question"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
divers/requirement-question
|
[
"region:us"
] |
2023-05-05T16:44:09+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "index", "dtype": "int64"}, {"name": "job_requirement", "dtype": "string"}, {"name": "questions", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 35480682, "num_examples": 23237}], "download_size": 4168927, "dataset_size": 35480682}}
|
2023-05-05T16:46:15+00:00
|
94f318bc5c14668cdca339309d33f3a44f2def12
|
# Dataset Card for "jobsedcription-requirement"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
divers/jobsedcription-requirement
|
[
"region:us"
] |
2023-05-05T16:50:17+00:00
|
{"dataset_info": {"features": [{"name": "index", "dtype": "int64"}, {"name": "job_description", "dtype": "string"}, {"name": "job_requirements", "dtype": "string"}, {"name": "unknown", "dtype": "float64"}, {"name": "__index_level_0__", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 25599853, "num_examples": 4551}], "download_size": 12633905, "dataset_size": 25599853}}
|
2023-05-05T16:50:23+00:00
|
d10964c71d6ae2475e4ce2af8c9f70969cd17ba3
|
chenuneris/lora-aurora-v2
|
[
"license:apache-2.0",
"region:us"
] |
2023-05-05T17:45:43+00:00
|
{"license": "apache-2.0"}
|
2023-05-05T17:53:38+00:00
|
|
9ee95208962648b74e37f418f21cfe2f3315f0f5
|
# The Cancer Genome Atlas Ovarian Cancer (NSCLC-Radiomics)
The models featured in this repository uses images from the publicly available [NSCLC-Radiomics](https://wiki.cancerimagingarchive.net/display/Public/NSCLC-Radiomics) Dataset.
Download the data from TCIA with **Descriptive Directory Name** download option.
## Converting Format
Convert DICOM images and segmentation to NIFTI format using [pydicom](https://pydicom.github.io/) and [pydicom-seg](https://razorx89.github.io/pydicom-seg/guides/read.html). Run:
```shell
user@machine:~/NSCLC-Radiomics-NIFTI$ python convert.py
```
## Segmentations
Images will have one of the following segmentation files:
```
โ seg-Esophagus.nii.gz
โ seg-GTV-1.nii.gz
โ seg-Heart.nii.gz
โ seg-Lung-Left.nii.gz
โ seg-Lung-Right.nii.gz
โ seg-Spinal-Cord.nii.gz
```
## Requirements
```
dicom2nifti==2.4.6
pandas==1.5.0
pydicom==2.3.1
pydicom-seg==0.4.1
SimpleITK==2.2.0
tqdm==4.64.1
```
## Citation
If using this repository, please cite the following works:
```
Data Citation
Aerts, H. J. W. L., Wee, L., Rios Velazquez, E., Leijenaar, R. T. H., Parmar, C., Grossmann, P.,
Carvalho, S., Bussink, J., Monshouwer, R., Haibe-Kains, B., Rietveld, D., Hoebers, F.,
Rietbergen, M. M., Leemans, C. R., Dekker, A., Quackenbush, J., Gillies, R. J., Lambin, P. (2019).
Data From NSCLC-Radiomics (version 4) [Data set].
The Cancer Imaging Archive.
https://doi.org/10.7937/K9/TCIA.2015.PF0M9REI
Publication Citation
Aerts, H. J. W. L., Velazquez, E. R., Leijenaar, R. T. H., Parmar, C., Grossmann, P., Carvalho, S.,
Bussink, J., Monshouwer, R., Haibe-Kains, B., Rietveld, D., Hoebers, F., Rietbergen, M. M.,
Leemans, C. R., Dekker, A., Quackenbush, J., Gillies, R. J., Lambin, P. (2014, June 3).
Decoding tumour phenotype by noninvasive imaging using a quantitative radiomics approach.
Nature Communications. Nature Publishing Group.
https://doi.org/10.1038/ncomms5006 (link)
TCIA Citation
Clark K, Vendt B, Smith K, Freymann J, Kirby J, Koppel P, Moore S, Phillips S, Maffitt D, Pringle M,
Tarbox L, Prior F.
The Cancer Imaging Archive (TCIA): Maintaining and Operating a Public Information Repository,
Journal of Digital Imaging, Volume 26, Number 6, December, 2013, pp 1045-1057.
https://doi.org/10.1007/s10278-013-9622-7
```
|
farrell236/NSCLC-Radiomics-NIFTI
|
[
"license:cc-by-3.0",
"medical",
"region:us"
] |
2023-05-05T18:27:54+00:00
|
{"license": "cc-by-3.0", "tags": ["medical"], "viewer": false}
|
2024-02-01T15:35:06+00:00
|
c40876c832174de2d72c98111c3191daf4dba7f8
|
# Dataset Card for Dolly-Odia-15K
## Dataset Description
- **Homepage: https://www.odiagenai.org/**
- **Repository: https://github.com/shantipriyap/OdiaGenAI**
- **Point of Contact: Shantipriya Parida, and Sambit Sekhar**
### Dataset Summary
This dataset is the Odia-translated version of the Dolly 15K instruction set. In this dataset both English and Odia instruction, input, and output strings are available.
### Supported Tasks and Leaderboards
Large Language Model (LLM)
### Languages
Odia
## Dataset Structure
JSON
### Data Fields
instruction (string)
english_instruction (string)
input (string)
english_input (string)
output (string)
english_output (string)
### Licensing Information
This work is licensed under a
[Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License][cc-by-nc-sa].
[![CC BY-NC-SA 4.0][cc-by-nc-sa-image]][cc-by-nc-sa]
[cc-by-nc-sa]: http://creativecommons.org/licenses/by-nc-sa/4.0/
[cc-by-nc-sa-image]: https://licensebuttons.net/l/by-nc-sa/4.0/88x31.png
[cc-by-nc-sa-shield]: https://img.shields.io/badge/License-CC%20BY--NC--SA%204.0-lightgrey.svg
### Citation Information
If you find this repository useful, please consider giving ๐ and citing:
```
@misc{OdiaGenAI,
author = {Shantipriya Parida and Sambit Sekhar and Subhadarshi Panda and Soumendra Kumar Sahoo and Swateek Jena and Abhijeet Parida and Arghyadeep Sen and Satya Ranjan Dash and Deepak Kumar Pradhan},
title = {OdiaGenAI: Generative AI and LLM Initiative for the Odia Language},
year = {2023},
publisher = {Hugging Face},
journal = {Hugging Face repository},
howpublished = {\url{https://huggingface.co/OdiaGenAI}},
}
```
### Contributions
- Shantipriya Parida
- Sambit Sekhar
|
OdiaGenAI/dolly-odia-15k
|
[
"task_categories:text-generation",
"size_categories:10K<n<100K",
"language:or",
"license:cc-by-nc-sa-4.0",
"region:us"
] |
2023-05-05T18:40:08+00:00
|
{"language": ["or"], "license": "cc-by-nc-sa-4.0", "size_categories": ["10K<n<100K"], "task_categories": ["text-generation"], "pretty_name": "Dolly-Odia-15K"}
|
2023-06-05T18:21:34+00:00
|
9fd0847ef727e725218ed0c793b99648a01c712b
|
# Dataset Card for Odia_GPT-Teacher-Instruct-Odia-18K
## Dataset Description
- **Homepage: https://www.odiagenai.org/**
- **Repository: https://github.com/shantipriyap/OdiaGenAI**
- **Point of Contact: Shantipriya Parida, and Sambit Sekhar**
### Dataset Summary
This dataset is the Odia-translated version of the GPT-Teacher 18K instruction set. In this dataset both English and Odia instruction, input, and output strings are available.
### Supported Tasks and Leaderboards
Large Language Model (LLM)
### Languages
Odia
## Dataset Structure
JSON
### Data Fields
instruction (string)
english_instruction (string)
input (string)
english_input (string)
output (string)
english_output (string)
### Licensing Information
This work is licensed under a
[Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License][cc-by-nc-sa].
[![CC BY-NC-SA 4.0][cc-by-nc-sa-image]][cc-by-nc-sa]
[cc-by-nc-sa]: http://creativecommons.org/licenses/by-nc-sa/4.0/
[cc-by-nc-sa-image]: https://licensebuttons.net/l/by-nc-sa/4.0/88x31.png
[cc-by-nc-sa-shield]: https://img.shields.io/badge/License-CC%20BY--NC--SA%204.0-lightgrey.svg
### Citation Information
If you find this repository useful, please consider giving ๐ and citing:
```
@misc{OdiaGenAI,
author = {Shantipriya Parida and Sambit Sekhar and Subhadarshi Panda and Soumendra Kumar Sahoo and Swateek Jena and Abhijeet Parida and Arghyadeep Sen and Satya Ranjan Dash and Deepak Kumar Pradhan},
title = {OdiaGenAI: Generative AI and LLM Initiative for the Odia Language},
year = {2023},
publisher = {Hugging Face},
journal = {Hugging Face repository},
howpublished = {\url{https://huggingface.co/OdiaGenAI}},
}
```
### Contributions
- Shantipriya Parida
- Sambit Sekhar
|
OdiaGenAI/gpt-teacher-instruct-odia-18k
|
[
"task_categories:text-generation",
"size_categories:10K<n<100K",
"language:or",
"license:cc-by-sa-4.0",
"region:us"
] |
2023-05-05T18:45:17+00:00
|
{"language": ["or"], "license": "cc-by-sa-4.0", "size_categories": ["10K<n<100K"], "task_categories": ["text-generation"], "pretty_name": "GPT-Teacher-Instruct-Odia-18K"}
|
2023-05-05T19:50:37+00:00
|
8d33eeda57429b5255615c84050ab2f4c2c56490
|
# Dataset Card for GPT-Teacher-RolePlay-Odia-3K
## Dataset Description
- **Homepage: https://www.odiagenai.org/**
- **Repository: https://github.com/shantipriyap/OdiaGenAI**
- **Point of Contact: Shantipriya Parida, and Sambit Sekhar**
### Dataset Summary
This dataset is the Odia-translated version of the GPT-Teacher-RolePlay 3K instruction set. In this dataset both English and Odia instruction, input, and output strings are available.
### Supported Tasks and Leaderboards
Large Language Model (LLM)
### Languages
Odia
## Dataset Structure
JSON
### Data Fields
instruction (string)
english_instruction (string)
input (string)
english_input (string)
output (string)
english_output (string)
### Licensing Information
This work is licensed under a
[Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License][cc-by-nc-sa].
[![CC BY-NC-SA 4.0][cc-by-nc-sa-image]][cc-by-nc-sa]
[cc-by-nc-sa]: http://creativecommons.org/licenses/by-nc-sa/4.0/
[cc-by-nc-sa-image]: https://licensebuttons.net/l/by-nc-sa/4.0/88x31.png
[cc-by-nc-sa-shield]: https://img.shields.io/badge/License-CC%20BY--NC--SA%204.0-lightgrey.svg
### Citation Information
If you find this repository useful, please consider giving ๐ and citing:
```
@misc{OdiaGenAI,
author = {Shantipriya Parida and Sambit Sekhar and Subhadarshi Panda and Soumendra Kumar Sahoo and Swateek Jena and Abhijeet Parida and Arghyadeep Sen and Satya Ranjan Dash and Deepak Kumar Pradhan},
title = {OdiaGenAI: Generative AI and LLM Initiative for the Odia Language},
year = {2023},
publisher = {Hugging Face},
journal = {Hugging Face repository},
howpublished = {\url{https://huggingface.co/OdiaGenAI}},
}
```
### Contributions
- Shantipriya Parida
- Sambit Sekhar
|
OdiaGenAI/gpt-teacher-roleplay-odia-3k
|
[
"task_categories:text-generation",
"size_categories:1K<n<10K",
"language:or",
"license:cc-by-nc-sa-4.0",
"region:us"
] |
2023-05-05T18:53:25+00:00
|
{"language": ["or"], "license": "cc-by-nc-sa-4.0", "size_categories": ["1K<n<10K"], "task_categories": ["text-generation"], "pretty_name": "GPT-Teacher-RolePlay-Odia-3K"}
|
2023-05-05T19:56:24+00:00
|
f8ea1ae76e986cf58ca91d852832f5d748d043f0
|
# Dataset Card for "ift_hhrlhf_flan"
My favorite subsets of FLAN with single-turn data filtered from HH RLHF
```
flan_cats_i_like = {
"arc_challenge_10templates",
"arc_easy_10templates",
"cola_10templates",
"copa_10templates",
"coqa_10templates",
"cosmos_qa_10templates",
"fix_punct_10templates",
"math_dataset_10templates",
"natural_questions_10templates",
"openbookqa_10templates",
"squad_v2_10templates",
"trivia_qa_10templates",
}
```
|
sam-mosaic/ift_hhrlhf_flan
|
[
"language:en",
"license:apache-2.0",
"region:us"
] |
2023-05-05T19:07:58+00:00
|
{"language": "en", "license": "apache-2.0", "dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "response", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 189653326, "num_examples": 438117}, {"name": "test", "num_bytes": 44677108, "num_examples": 116065}], "download_size": 135373906, "dataset_size": 234330434}}
|
2023-07-17T23:23:59+00:00
|
9f6d1e055fab47c2c27b0c29640636871bb8c3f2
|
KyonBS/Nishikata-TakagiSan
|
[
"license:openrail",
"region:us"
] |
2023-05-05T19:13:49+00:00
|
{"license": "openrail"}
|
2023-05-05T19:14:29+00:00
|
|
761500a76cfc7fc6594d98430e575f8f2b3c5921
|
# Dataset Card for "ffhq_controlnet_5_5_23_feat4"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cr7Por/ffhq_controlnet_5_5_23_feat4
|
[
"region:us"
] |
2023-05-05T20:54:19+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "image_caption", "dtype": "string"}, {"name": "feat_beit", "dtype": "image"}, {"name": "feat_resnet", "dtype": "image"}, {"name": "feat_convnext", "dtype": "image"}, {"name": "feat_vitmae", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 22028080257.5, "num_examples": 39641}], "download_size": 19097660347, "dataset_size": 22028080257.5}}
|
2023-05-05T23:58:53+00:00
|
7b937efc0994ea7b2684408c64653f054bceb983
|
Astonzzh/samsum_augmented
|
[
"license:mit",
"region:us"
] |
2023-05-05T21:07:25+00:00
|
{"license": "mit", "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "dialogue", "dtype": "string"}, {"name": "summary", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9537428, "num_examples": 14732}, {"name": "test", "num_bytes": 537932, "num_examples": 819}, {"name": "validation", "num_bytes": 520047, "num_examples": 818}], "download_size": 6755468, "dataset_size": 10595407}}
|
2023-05-05T21:07:41+00:00
|
|
7bf9a4b64d1dba33cbbac51996253bb779f4b2e6
|
# Dataset Card for "yake_top3_asks_cleaned"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
dhmeltzer/yake_top3_asks_cleaned
|
[
"region:us"
] |
2023-05-05T22:08:29+00:00
|
{"dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "selftext", "dtype": "string"}, {"name": "answers.text", "dtype": "string"}, {"name": "answers.score", "dtype": "int64"}, {"name": "title_urls.url", "sequence": "string"}, {"name": "selftext_urls.url", "sequence": "string"}, {"name": "answers_urls.url", "sequence": "string"}, {"name": "sent_vec", "sequence": "float32"}], "splits": [{"name": "train", "num_bytes": 366313853, "num_examples": 125323}, {"name": "validation", "num_bytes": 6571413, "num_examples": 2060}, {"name": "test", "num_bytes": 12675987, "num_examples": 4058}], "download_size": 393424753, "dataset_size": 385561253}}
|
2023-05-05T22:08:53+00:00
|
6b96caf6f9d82ccad032b20a35e8729da251291e
|
# Dataset Card for "pokemon_bulbapedia_one_sentence"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
matemato/pokemon_bulbapedia_1_sentence
|
[
"region:us"
] |
2023-05-05T23:13:45+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 100688946.0, "num_examples": 721}], "download_size": 83892845, "dataset_size": 100688946.0}}
|
2023-05-07T11:54:58+00:00
|
65c2b196226ad569575492b8e41737ba8cfb7bae
|
drachmai/varied-task-concern
|
[
"license:apache-2.0",
"region:us"
] |
2023-05-05T23:57:13+00:00
|
{"license": "apache-2.0", "dataset_info": {"features": [{"name": "previous_chat", "dtype": "string"}, {"name": "last_message", "dtype": "string"}, {"name": "concerning_definitions", "dtype": "string"}, {"name": "is_concerning_score", "dtype": "float32"}], "splits": [{"name": "train", "num_bytes": 25398277, "num_examples": 22427}, {"name": "validation", "num_bytes": 3139366, "num_examples": 2803}, {"name": "test", "num_bytes": 3236027, "num_examples": 2804}], "download_size": 14288720, "dataset_size": 31773670}}
|
2023-05-06T00:04:43+00:00
|
|
02993de15491c5d57ea9f969334f634cb7b067a0
|
max_src_len = 512, max_trg_len = 256
|
intm/codet5_go-generation
|
[
"license:apache-2.0",
"region:us"
] |
2023-05-06T00:00:49+00:00
|
{"license": "apache-2.0"}
|
2023-05-06T00:06:07+00:00
|
83adb74218284a2c34c61f92ed8dc31407084ac4
|
annotations_creators:
- machine-generated
language:
- en
license:
- apache-2.0
multilinguality:
- monolingual
task_categories:
- classification
- generation
pretty_name: PubMed_ArXiv_Abstracts
features:
- name: abstr
dtype: string
- name: title
dtype: string
- name: journal
dtype: string
- name: field
dtype: string
- name: label_journal
dtype: int64
- name: label_field
dtype: int64
|
brainchalov/pubmed_arxiv_abstracts_data
|
[
"region:us"
] |
2023-05-06T00:10:14+00:00
|
{}
|
2023-05-06T01:08:45+00:00
|
c5cd9f179f327b8555e12fba16fc8e46c7fcf78e
|
Shumin001/nerf
|
[
"region:us"
] |
2023-05-06T01:07:03+00:00
|
{}
|
2023-05-06T09:42:33+00:00
|
|
c967ac7604b05b80bfa0f91e3f316d805a7a0993
|
You are probably looking for the raven models found here : https://huggingface.co/BlinkDL/rwkv-4-raven
This is a collection of converted CPP binaries, that maybe prequantized.
This is primarily used for the rwkv-cpp-node project here : https://github.com/RWKV/RWKV-cpp-node
|
picocreator/rwkv-4-cpp-quantize-bin
|
[
"license:apache-2.0",
"region:us"
] |
2023-05-06T01:12:12+00:00
|
{"license": "apache-2.0"}
|
2023-05-12T07:53:47+00:00
|
2ab500c6688da1b2818dfda8613588be70729728
|
aerinsafe/nva-PsycheTest01
|
[
"license:mit",
"region:us"
] |
2023-05-06T01:19:05+00:00
|
{"license": "mit"}
|
2023-05-06T01:20:01+00:00
|
|
c5bd600354cc9267c757c8a4a241ebbf2baf6f02
|
foilfoilfoil/GGB-Discord-Data-top-6
|
[
"license:other",
"region:us"
] |
2023-05-06T02:30:56+00:00
|
{"license": "other"}
|
2023-05-06T02:31:13+00:00
|
|
65fd368b73f85a4bf561204033da817750a7f66a
|
rohk/tycho-catalog
|
[
"license:cc0-1.0",
"region:us"
] |
2023-05-06T02:54:22+00:00
|
{"license": "cc0-1.0"}
|
2023-05-06T02:57:29+00:00
|
|
d2369489c1ef9b2b1cc8027d1307c2f66e02d2c6
|
rohk/hipparcos-catalog
|
[
"license:cc0-1.0",
"region:us"
] |
2023-05-06T02:59:52+00:00
|
{"license": "cc0-1.0"}
|
2023-05-06T03:00:49+00:00
|
|
5a9eca1999b46405071f616a7a9f2b3fd83f1a16
|
# CC-100 zh-Hant (Traditional Chinese)
From https://data.statmt.org/cc-100/, only zh-Hant - Chinese (Traditional). Broken into paragraphs, with each paragraphs as a row.
Estimated to have around 4B tokens when tokenized with the [`bigscience/bloom`](https://huggingface.co/bigscience/bloom) tokenizer.
There's another version that the text is split by lines instead of paragraphs: [`zetavg/CC-100-zh-Hant`](https://huggingface.co/datasets/zetavg/CC-100-zh-Hant).
## References
Please cite the following if you found the resources in the CC-100 corpus useful.
* **Unsupervised Cross-lingual Representation Learning at Scale**, *Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmรกn, Edouard Grave, Myle Ott, Luke Zettlemoyer, Veselin Stoyanov*, Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics (ACL), p. 8440-8451, July 2020, [pdf](https://www.aclweb.org/anthology/2020.acl-main.747.pdf), [bib](https://www.aclweb.org/anthology/2020.acl-main.747.bib) .
* **CCNet: Extracting High Quality Monolingual Datasets from Web Crawl Data**, *Guillaume Wenzek, Marie-Anne Lachaux, Alexis Conneau, Vishrav Chaudhary, Francisco Guzmรกn, Armand Joulin, Edouard Grave*, Proceedings of the 12th Language Resources and Evaluation Conference (LREC), p. 4003-4012, May 2020, [pdf](https://www.aclweb.org/anthology/2020.lrec-1.494.pdf), [bib](https://www.aclweb.org/anthology/2020.lrec-1.494.bib).
|
zetavg/CC-100-zh-Hant-merged
|
[
"region:us"
] |
2023-05-06T03:28:11+00:00
|
{"dataset_info": {"features": [{"name": "content", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 17882150544, "num_examples": 12328228}], "download_size": 12940914691, "dataset_size": 17882150544}}
|
2023-05-06T10:11:44+00:00
|
0fb943f45c3bfbce6dd7bb667f57ffa9f90b9208
|
# Dataset Card for "my_controlnet_feat4"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
cr7Por/my_controlnet_feat4
|
[
"region:us"
] |
2023-05-06T03:56:55+00:00
|
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "image_caption", "dtype": "string"}, {"name": "feat_beit", "dtype": "image"}, {"name": "feat_resnet", "dtype": "image"}, {"name": "feat_convnext", "dtype": "image"}, {"name": "feat_vitmae", "dtype": "image"}, {"name": "validline", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 171271858.0, "num_examples": 435}], "download_size": 170826843, "dataset_size": 171271858.0}}
|
2023-05-06T03:57:55+00:00
|
958cd8527995b51ba8a26f2b1471dc133950223a
|
# Dataset Card for "wiki_books2048"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
jxie/wiki_books2048
|
[
"region:us"
] |
2023-05-06T04:08:34+00:00
|
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 22313570880, "num_examples": 11350967}], "download_size": 13120075599, "dataset_size": 22313570880}}
|
2023-05-06T04:28:44+00:00
|
0e840ee86ffe678b0a774da3c906e119cbb173a9
|
IdaLee/IdaDB
|
[
"task_categories:text-classification",
"language:zh",
"license:mit",
"finance",
"region:us"
] |
2023-05-06T04:57:27+00:00
|
{"language": ["zh"], "license": "mit", "task_categories": ["text-classification"], "pretty_name": "IdaDB", "tags": ["finance"]}
|
2023-05-06T04:58:38+00:00
|
|
12d83cf5631e642d3aeba35f4cc0e344eca1b027
|
(Works with [Mobile-Env v3.x](https://github.com/X-LANCE/Mobile-Env/tree/v3.0).)
# WikiHow Task Set
WikiHow task set is an InfoUI interaction task set based on
[Mobile-Env](https://github.com/X-LANCE/Mobile-Env) proposed in [*Mobile-Env:
An Evaluation Platform and Benchmark for Interactive Agents in LLM
Era*](https://arxiv.org/abs/2305.08144).
[WikiHow](https://www.wikihow.com/Main-Page) is a collaborative wiki site about
various real-life tips with more than 340,000 online articles. To construct the
task set, 107,448 pages are crawled, and the dumped website data occupy about
88 GiB totally.
Several task definition templates are designed according to the functions of
WikiHow app and task definitions are instantiated through the template toolkit
in Mobile-Env. 577 tasks are sampled from the extended set, which is named the
*canonical set* (`wikihow-canonical.tar.xz`). Owing to the limit of the
budgets, only 150 tasks are tested using the proposed LLM-based agent. These
150 tasks are given in `wikihow-microcanon.tar.xz`. We call it the *canonical
subset* or the *micro canonical set*.
### Website Data Replay
The replay script for [mitmproxy](https://mitmproxy.org/) is given as
`replay_url.py`. To use this replay script, the information retrieval tool
[Pyserini](https://github.com/castorini/pyserini/) is required. Four parameters
are expected to be assigned in the script:
+ The crawled data from WikiHow website (`dumps` in `wikihow.data.tar.xz`)
+ The HTML templates used to mock the search result page (`templates` in
`wikihow.data.tar.xz`)
+ The indices for the search engine based on Pyserini (`indices-t/indices` in
`wikihow.data.tar.xz`)
+ The metadata of the crawled articles (`indices-t/docs/doc_meta.csv` in
`wikihow.data.tar.xz`)
All the required data are offered in `wikihow.data.tar.xz`. (The archive is
about 78 GiB. And the decompressed data are about 88 GiB.) The archive is split
into two pieces (`wikihow.data.tar.xz.00` and `wikihow.data.tar.xz.01`). You
can use `cat` to concatenate them:
```sh
cat wikihow.data.tar.xz.00 wikihow.data.tar.xz.01 >wikihow.data.tar.xz
```
The SHA256 checksums are provided in `wikihow.data.tar.xz.sha256` to check the
integrity.
To run the script:
```sh
mitmproxy --showhost -s replay_url.py
```
### Certificate Unpinning Plan
The `syscert` plan proposed by Mobile-Env works for WikiHow app. You can
complete the config according to the [guideline of
Mobile-Env](https://github.com/X-LANCE/Mobile-Env/blob/master/docs/dynamic-app-en.md).
The available APK package from [APKCombo](https://apkcombo.com/) is provided.
And note to use the AVD image of version Android 11.0 (API Level 30) (Google
APIs) to obtain the best compatibility and the root-enabled ADBD.
### Human-Rewritten Instructions
Human-rewritten instructions for the *canonical set* are release under
`instruction_rewriting/`. An AndroidEnv wrapper `InstructionRewritingWrapper`
is provided to load the rewritten instructions (`merged_doccano.json`) and
public patterns (`pattern-*.txt`). The annotations are collected via
[doccano](https://doccano.github.io/doccano/). The patterns are parsed by
[`sentence_pattern.py`](instruction_rewriting/sentence_pattern.py).
|
zdy023/WikiHow-taskset
|
[
"license:apache-2.0",
"arxiv:2305.08144",
"region:us"
] |
2023-05-06T05:01:34+00:00
|
{"license": "apache-2.0"}
|
2023-12-18T12:29:31+00:00
|
b01fb013d5595abe18c97408dd2fc939e4f7ea3f
|
# Dataset Card for Huatuo_knowledge_graph_qa
## Dataset Description
- **Homepage: https://www.huatuogpt.cn/**
- **Repository: https://github.com/FreedomIntelligence/HuatuoGPT**
- **Paper: https://arxiv.org/abs/2305.01526**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
We built this QA dataset based on the medical knowledge map, with a total of 798,444 pieces of data, in which the questions are constructed by means of templates, and the answers are the contents of the entries in the knowledge map.
## Dataset Creation
### Source Data
https://cpubmed.openi.org.cn/graph/wiki
https://github.com/zhihao-chen/QASystemOnMedicalGraph
https://github.com/baiyang2464/chatbot-base-on-Knowledge-Graph
## Citation
```
@misc{li2023huatuo26m,
title={Huatuo-26M, a Large-scale Chinese Medical QA Dataset},
author={Jianquan Li and Xidong Wang and Xiangbo Wu and Zhiyi Zhang and Xiaolong Xu and Jie Fu and Prayag Tiwari and Xiang Wan and Benyou Wang},
year={2023},
eprint={2305.01526},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
|
FreedomIntelligence/huatuo_knowledge_graph_qa
|
[
"task_categories:text-generation",
"size_categories:100K<n<1M",
"language:zh",
"license:apache-2.0",
"medical",
"arxiv:2305.01526",
"region:us"
] |
2023-05-06T05:35:38+00:00
|
{"language": ["zh"], "license": "apache-2.0", "size_categories": ["100K<n<1M"], "task_categories": ["text-generation"], "tags": ["medical"]}
|
2023-07-07T07:46:58+00:00
|
338fe1ab5881c216526a906f417cdb588bff517c
|
# Dataset Card for "final_train_v2_30000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v2_30000
|
[
"region:us"
] |
2023-05-06T05:47:30+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9099467.1, "num_examples": 27000}, {"name": "test", "num_bytes": 1011051.9, "num_examples": 3000}], "download_size": 4433108, "dataset_size": 10110519.0}}
|
2023-05-06T05:47:34+00:00
|
37e39d69bba67ec6228688d18450eede2b1c1ba3
|
# Dataset Card for "final_train_v2_60000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v2_60000
|
[
"region:us"
] |
2023-05-06T05:47:35+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9124491.6, "num_examples": 27000}, {"name": "test", "num_bytes": 1013832.4, "num_examples": 3000}], "download_size": 4445739, "dataset_size": 10138324.0}}
|
2023-05-06T05:47:38+00:00
|
8af75d493a24255a629bea643bd746fc14109841
|
# Dataset Card for "final_train_v2_90000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v2_90000
|
[
"region:us"
] |
2023-05-06T05:47:39+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9136964.7, "num_examples": 27000}, {"name": "test", "num_bytes": 1015218.3, "num_examples": 3000}], "download_size": 4451000, "dataset_size": 10152183.0}}
|
2023-05-06T05:47:43+00:00
|
3926d3856932c058e39d36e25bdfa0aead2b8ca4
|
# Dataset Card for "final_train_v2_120000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v2_120000
|
[
"region:us"
] |
2023-05-06T05:47:44+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9133587.0, "num_examples": 27000}, {"name": "test", "num_bytes": 1014843.0, "num_examples": 3000}], "download_size": 4454698, "dataset_size": 10148430.0}}
|
2023-05-06T05:47:47+00:00
|
b96f935778c2b41d67c40d65577ae3234b4678fa
|
# Dataset Card for "final_train_v2_150000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v2_150000
|
[
"region:us"
] |
2023-05-06T05:47:48+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9145459.8, "num_examples": 27000}, {"name": "test", "num_bytes": 1016162.2, "num_examples": 3000}], "download_size": 4456343, "dataset_size": 10161622.0}}
|
2023-05-06T05:47:52+00:00
|
91735712b0ff6bb30b1341fbe6ff3f5ca1e02dd7
|
# Dataset Card for "final_train_v2_180000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v2_180000
|
[
"region:us"
] |
2023-05-06T05:47:53+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9137039.4, "num_examples": 27000}, {"name": "test", "num_bytes": 1015226.6, "num_examples": 3000}], "download_size": 4451731, "dataset_size": 10152266.0}}
|
2023-05-06T05:47:57+00:00
|
55a9f052fa46a100dc079fe4e333fde7baf027a5
|
# Dataset Card for "final_train_v2_210000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v2_210000
|
[
"region:us"
] |
2023-05-06T05:47:58+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9130050.9, "num_examples": 27000}, {"name": "test", "num_bytes": 1014450.1, "num_examples": 3000}], "download_size": 4448876, "dataset_size": 10144501.0}}
|
2023-05-06T05:48:01+00:00
|
56dc1356c57219ac6fb93218fa9ff5e5003fa86e
|
# Dataset Card for "final_train_v2_240000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v2_240000
|
[
"region:us"
] |
2023-05-06T05:48:02+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9143957.7, "num_examples": 27000}, {"name": "test", "num_bytes": 1015995.3, "num_examples": 3000}], "download_size": 4457503, "dataset_size": 10159953.0}}
|
2023-05-06T05:48:06+00:00
|
5025b313b9ba92f8cab82c06d4abbdaca6dc401b
|
# Dataset Card for "final_train_v2_270000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v2_270000
|
[
"region:us"
] |
2023-05-06T05:48:07+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9136682.1, "num_examples": 27000}, {"name": "test", "num_bytes": 1015186.9, "num_examples": 3000}], "download_size": 4450927, "dataset_size": 10151869.0}}
|
2023-05-06T05:48:10+00:00
|
bb26f8a3d337e4e10ffcf16b7aac3315712c590e
|
# Dataset Card for "final_train_v2_300000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v2_300000
|
[
"region:us"
] |
2023-05-06T05:48:11+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9152502.3, "num_examples": 27000}, {"name": "test", "num_bytes": 1016944.7, "num_examples": 3000}], "download_size": 4455484, "dataset_size": 10169447.0}}
|
2023-05-06T05:48:15+00:00
|
82988e20897edc53e556de4dc264aa1e6ce23e85
|
# Dataset Card for "final_train_v2_330000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v2_330000
|
[
"region:us"
] |
2023-05-06T05:48:16+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9121966.2, "num_examples": 27000}, {"name": "test", "num_bytes": 1013551.8, "num_examples": 3000}], "download_size": 4447462, "dataset_size": 10135518.0}}
|
2023-05-06T05:48:20+00:00
|
bb52000a759d74cc6acb76a08fa24822692c2b51
|
# Dataset Card for "final_train_v2_360000"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
mHossain/final_train_v2_360000
|
[
"region:us"
] |
2023-05-06T05:48:21+00:00
|
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "input_text", "dtype": "string"}, {"name": "target_text", "dtype": "string"}, {"name": "prefix", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9145644.3, "num_examples": 27000}, {"name": "test", "num_bytes": 1016182.7, "num_examples": 3000}], "download_size": 4459398, "dataset_size": 10161827.0}}
|
2023-05-06T05:48:25+00:00
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.