modelId
stringlengths
5
139
author
stringlengths
2
42
last_modified
timestamp[us, tz=UTC]date
2020-02-15 11:33:14
2025-09-22 00:45:16
downloads
int64
0
223M
likes
int64
0
11.7k
library_name
stringclasses
570 values
tags
listlengths
1
4.05k
pipeline_tag
stringclasses
55 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2025-09-22 00:43:28
card
stringlengths
11
1.01M
jonatasgrosman/exp_w2v2t_en_vp-fr_s51
jonatasgrosman
2022-07-08T07:29:19Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T07:28:38Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_vp-fr_s51 Fine-tuned [facebook/wav2vec2-large-fr-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-fr-voxpopuli) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_en_vp-fr_s691
jonatasgrosman
2022-07-08T07:20:48Z
5
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T07:20:01Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_vp-fr_s691 Fine-tuned [facebook/wav2vec2-large-fr-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-fr-voxpopuli) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_en_vp-fr_s118
jonatasgrosman
2022-07-08T07:12:26Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T07:12:00Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_vp-fr_s118 Fine-tuned [facebook/wav2vec2-large-fr-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-fr-voxpopuli) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_en_unispeech-ml_s756
jonatasgrosman
2022-07-08T07:05:35Z
4
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T07:04:52Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_unispeech-ml_s756 Fine-tuned [microsoft/unispeech-large-multi-lingual-1500h-cv](https://huggingface.co/microsoft/unispeech-large-multi-lingual-1500h-cv) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_en_unispeech-ml_s377
jonatasgrosman
2022-07-08T06:52:52Z
3
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T06:52:07Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_unispeech-ml_s377 Fine-tuned [microsoft/unispeech-large-multi-lingual-1500h-cv](https://huggingface.co/microsoft/unispeech-large-multi-lingual-1500h-cv) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_en_wavlm_s767
jonatasgrosman
2022-07-08T06:33:36Z
3
0
transformers
[ "transformers", "pytorch", "wavlm", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T06:32:43Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_wavlm_s767 Fine-tuned [microsoft/wavlm-large](https://huggingface.co/microsoft/wavlm-large) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_en_vp-sv_s179
jonatasgrosman
2022-07-08T06:02:23Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T06:01:42Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_vp-sv_s179 Fine-tuned [facebook/wav2vec2-large-sv-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-sv-voxpopuli) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_en_hubert_s877
jonatasgrosman
2022-07-08T05:55:00Z
4
0
transformers
[ "transformers", "pytorch", "hubert", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T05:54:23Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_hubert_s877 Fine-tuned [facebook/hubert-large-ll60k](https://huggingface.co/facebook/hubert-large-ll60k) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_en_hubert_s596
jonatasgrosman
2022-07-08T05:50:29Z
3
0
transformers
[ "transformers", "pytorch", "hubert", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T05:49:43Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_hubert_s596 Fine-tuned [facebook/hubert-large-ll60k](https://huggingface.co/facebook/hubert-large-ll60k) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_en_unispeech_s809
jonatasgrosman
2022-07-08T05:41:57Z
5
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T05:41:08Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_unispeech_s809 Fine-tuned [microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_en_unispeech_s870
jonatasgrosman
2022-07-08T05:31:32Z
4
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T05:30:42Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_unispeech_s870 Fine-tuned [microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_en_xlsr-53_s279
jonatasgrosman
2022-07-08T05:26:47Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T05:26:21Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_xlsr-53_s279 Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_en_xlsr-53_s769
jonatasgrosman
2022-07-08T05:19:10Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T05:18:22Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_xlsr-53_s769 Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_en_xlsr-53_s870
jonatasgrosman
2022-07-08T05:07:22Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T05:06:55Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_xlsr-53_s870 Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_en_vp-100k_s421
jonatasgrosman
2022-07-08T04:43:53Z
5
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T04:43:09Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_vp-100k_s421 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
rserenity/shuukobot
rserenity
2022-07-08T04:38:26Z
0
0
null
[ "tensorboard", "text-generation", "region:us" ]
text-generation
2022-07-08T02:58:22Z
--- tags: - text-generation ---
jonatasgrosman/exp_w2v2t_en_vp-100k_s807
jonatasgrosman
2022-07-08T04:33:29Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "en", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-08T04:32:40Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_en_vp-100k_s807 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition on English using the train split of [Common Voice 7.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
eplatas/distilroberta-base-finetuned-wikitext2
eplatas
2022-07-08T01:58:11Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "roberta", "fill-mask", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-07-08T01:52:09Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilroberta-base-finetuned-wikitext2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilroberta-base-finetuned-wikitext2 This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.8359 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 87 | 1.9893 | | No log | 2.0 | 174 | 1.9055 | | No log | 3.0 | 261 | 1.8187 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
tfshaman/distilbert-base-uncased-finetuned-clinc
tfshaman
2022-07-07T22:15:13Z
7
0
transformers
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:clinc_oos", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-07-07T21:36:11Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - clinc_oos metrics: - accuracy model-index: - name: distilbert-base-uncased-finetuned-clinc results: - task: name: Text Classification type: text-classification dataset: name: clinc_oos type: clinc_oos args: plus metrics: - name: Accuracy type: accuracy value: 0.9158064516129032 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-clinc This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the clinc_oos dataset. It achieves the following results on the evaluation set: - Loss: 0.7786 - Accuracy: 0.9158 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 4.2838 | 1.0 | 318 | 3.2787 | 0.7455 | | 2.622 | 2.0 | 636 | 1.8706 | 0.8332 | | 1.5466 | 3.0 | 954 | 1.1623 | 0.8939 | | 1.0135 | 4.0 | 1272 | 0.8619 | 0.91 | | 0.7985 | 5.0 | 1590 | 0.7786 | 0.9158 | ### Framework versions - Transformers 4.21.0.dev0 - Pytorch 1.12.0 - Datasets 2.3.2 - Tokenizers 0.12.1
osanseviero/en_core_web_sm
osanseviero
2022-07-07T21:29:21Z
6
0
spacy
[ "spacy", "token-classification", "en", "license:mit", "model-index", "region:us" ]
token-classification
2022-07-07T21:28:43Z
--- tags: - spacy - token-classification language: - en license: mit model-index: - name: en_core_web_sm results: - task: name: NER type: token-classification metrics: - name: NER Precision type: precision value: 0.8508041869 - name: NER Recall type: recall value: 0.8344851763 - name: NER F Score type: f_score value: 0.8425656714 - task: name: TAG type: token-classification metrics: - name: TAG (XPOS) Accuracy type: accuracy value: 0.9726545475 - task: name: UNLABELED_DEPENDENCIES type: token-classification metrics: - name: Unlabeled Attachment Score (UAS) type: f_score value: 0.9180803841 - task: name: LABELED_DEPENDENCIES type: token-classification metrics: - name: Labeled Attachment Score (LAS) type: f_score value: 0.8996666011 - task: name: SENTS type: token-classification metrics: - name: Sentences F-Score type: f_score value: 0.9060200669 --- ### Details: https://spacy.io/models/en#en_core_web_sm English pipeline optimized for CPU. Components: tok2vec, tagger, parser, senter, ner, attribute_ruler, lemmatizer. | Feature | Description | | --- | --- | | **Name** | `en_core_web_sm` | | **Version** | `3.3.0` | | **spaCy** | `>=3.3.0.dev0,<3.4.0` | | **Default Pipeline** | `tok2vec`, `tagger`, `parser`, `attribute_ruler`, `lemmatizer`, `ner` | | **Components** | `tok2vec`, `tagger`, `parser`, `senter`, `attribute_ruler`, `lemmatizer`, `ner` | | **Vectors** | 0 keys, 0 unique vectors (0 dimensions) | | **Sources** | [OntoNotes 5](https://catalog.ldc.upenn.edu/LDC2013T19) (Ralph Weischedel, Martha Palmer, Mitchell Marcus, Eduard Hovy, Sameer Pradhan, Lance Ramshaw, Nianwen Xue, Ann Taylor, Jeff Kaufman, Michelle Franchini, Mohammed El-Bachouti, Robert Belvin, Ann Houston)<br />[ClearNLP Constituent-to-Dependency Conversion](https://github.com/clir/clearnlp-guidelines/blob/master/md/components/dependency_conversion.md) (Emory University)<br />[WordNet 3.0](https://wordnet.princeton.edu/) (Princeton University) | | **License** | `MIT` | | **Author** | [Explosion](https://explosion.ai) | ### Label Scheme <details> <summary>View label scheme (112 labels for 3 components)</summary> | Component | Labels | | --- | --- | | **`tagger`** | `$`, `''`, `,`, `-LRB-`, `-RRB-`, `.`, `:`, `ADD`, `AFX`, `CC`, `CD`, `DT`, `EX`, `FW`, `HYPH`, `IN`, `JJ`, `JJR`, `JJS`, `LS`, `MD`, `NFP`, `NN`, `NNP`, `NNPS`, `NNS`, `PDT`, `POS`, `PRP`, `PRP$`, `RB`, `RBR`, `RBS`, `RP`, `SYM`, `TO`, `UH`, `VB`, `VBD`, `VBG`, `VBN`, `VBP`, `VBZ`, `WDT`, `WP`, `WP$`, `WRB`, `XX`, ```` | | **`parser`** | `ROOT`, `acl`, `acomp`, `advcl`, `advmod`, `agent`, `amod`, `appos`, `attr`, `aux`, `auxpass`, `case`, `cc`, `ccomp`, `compound`, `conj`, `csubj`, `csubjpass`, `dative`, `dep`, `det`, `dobj`, `expl`, `intj`, `mark`, `meta`, `neg`, `nmod`, `npadvmod`, `nsubj`, `nsubjpass`, `nummod`, `oprd`, `parataxis`, `pcomp`, `pobj`, `poss`, `preconj`, `predet`, `prep`, `prt`, `punct`, `quantmod`, `relcl`, `xcomp` | | **`ner`** | `CARDINAL`, `DATE`, `EVENT`, `FAC`, `GPE`, `LANGUAGE`, `LAW`, `LOC`, `MONEY`, `NORP`, `ORDINAL`, `ORG`, `PERCENT`, `PERSON`, `PRODUCT`, `QUANTITY`, `TIME`, `WORK_OF_ART` | </details> ### Accuracy | Type | Score | | --- | --- | | `TOKEN_ACC` | 99.93 | | `TOKEN_P` | 99.57 | | `TOKEN_R` | 99.58 | | `TOKEN_F` | 99.57 | | `TAG_ACC` | 97.27 | | `SENTS_P` | 91.89 | | `SENTS_R` | 89.35 | | `SENTS_F` | 90.60 | | `DEP_UAS` | 91.81 | | `DEP_LAS` | 89.97 | | `ENTS_P` | 85.08 | | `ENTS_R` | 83.45 | | `ENTS_F` | 84.26 |
phyous/q-FrozenLake-v1-4x4-noSlippery
phyous
2022-07-07T20:31:38Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-07-07T20:31:33Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="phyous/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
Forkits/Reinforce-CartPole
Forkits
2022-07-07T20:30:43Z
0
0
null
[ "CartPole-v1", "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class", "model-index", "region:us" ]
reinforcement-learning
2022-07-06T21:06:43Z
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole results: - metrics: - type: mean_reward value: 95.30 +/- 33.98 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 5 of the Deep Reinforcement Learning Class: https://github.com/huggingface/deep-rl-class/tree/main/unit5
huggingtweets/mcconaughey
huggingtweets
2022-07-07T19:10:58Z
5
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "huggingtweets", "en", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2022-07-07T19:10:26Z
--- language: en thumbnail: http://www.huggingtweets.com/mcconaughey/1657221054082/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1191381171164237824/jdS95Rtm_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Matthew McConaughey</div> <div style="text-align: center; font-size: 14px;">@mcconaughey</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Matthew McConaughey. | Data | Matthew McConaughey | | --- | --- | | Tweets downloaded | 2519 | | Retweets | 595 | | Short tweets | 264 | | Tweets kept | 1660 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/3cksy9wk/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @mcconaughey's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/3hgi91kg) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/3hgi91kg/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/mcconaughey') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
huggingtweets/technothepig
huggingtweets
2022-07-07T19:01:06Z
5
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "huggingtweets", "en", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2022-03-02T23:29:05Z
--- language: en thumbnail: http://www.huggingtweets.com/technothepig/1657220462442/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1284959902671093761/tLN43QKJ_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Technoblade</div> <div style="text-align: center; font-size: 14px;">@technothepig</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Technoblade. | Data | Technoblade | | --- | --- | | Tweets downloaded | 1448 | | Retweets | 172 | | Short tweets | 299 | | Tweets kept | 977 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/38ipidr1/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @technothepig's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/1x797ecq) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/1x797ecq/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/technothepig') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Mascariddu8/distilbert-base-uncased-finetuned-imdb
Mascariddu8
2022-07-07T17:47:28Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "distilbert", "fill-mask", "generated_from_trainer", "dataset:imdb", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-07-07T17:34:16Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb model-index: - name: distilbert-base-uncased-finetuned-imdb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-imdb This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 2.4721 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.7086 | 1.0 | 157 | 2.4897 | | 2.5796 | 2.0 | 314 | 2.4230 | | 2.5269 | 3.0 | 471 | 2.4354 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
kurianbenoy/paddy_convnext_model
kurianbenoy
2022-07-07T17:36:01Z
0
0
fastai
[ "fastai", "image-classification", "license:mit", "region:us" ]
image-classification
2022-06-20T13:42:41Z
--- license: mit tags: - fastai - image-classification --- # Model card ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed
juanna/gptdc
juanna
2022-07-07T15:13:14Z
4
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2022-07-07T11:22:26Z
skt에서 만든 gptdc를 ainize 서비스를 이용해서 훈련시키고 huggingface에서 시뮬레이션 합니다
gemasphi/laprador_trained
gemasphi
2022-07-07T14:25:10Z
1
0
sentence-transformers
[ "sentence-transformers", "pytorch", "distilbert", "feature-extraction", "sentence-similarity", "transformers", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
sentence-similarity
2022-07-07T14:25:03Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # gemasphi/laprador_trained This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('gemasphi/laprador_trained') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('gemasphi/laprador_trained') model = AutoModel.from_pretrained('gemasphi/laprador_trained') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=gemasphi/laprador_trained) ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 350, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
hsohn3/mayo-bert-visit-uncased-wordlevel-block512-batch4-ep50
hsohn3
2022-07-07T13:26:07Z
3
0
transformers
[ "transformers", "tf", "bert", "fill-mask", "generated_from_keras_callback", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-07-07T02:00:16Z
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: hsohn3/mayo-bert-visit-uncased-wordlevel-block512-batch4-ep50 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # hsohn3/mayo-bert-visit-uncased-wordlevel-block512-batch4-ep50 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.0832 - Epoch: 49 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 2e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Epoch | |:----------:|:-----:| | 4.1266 | 0 | | 3.5212 | 1 | | 3.4780 | 2 | | 3.4533 | 3 | | 3.4376 | 4 | | 3.4325 | 5 | | 3.4276 | 6 | | 3.4119 | 7 | | 3.3654 | 8 | | 3.2948 | 9 | | 3.2422 | 10 | | 3.2069 | 11 | | 3.1736 | 12 | | 3.1292 | 13 | | 3.0781 | 14 | | 3.0138 | 15 | | 2.9582 | 16 | | 2.8954 | 17 | | 2.7166 | 18 | | 2.4073 | 19 | | 2.1573 | 20 | | 1.9469 | 21 | | 1.7981 | 22 | | 1.7012 | 23 | | 1.6145 | 24 | | 1.5482 | 25 | | 1.4866 | 26 | | 1.4336 | 27 | | 1.3815 | 28 | | 1.3483 | 29 | | 1.3087 | 30 | | 1.2814 | 31 | | 1.2548 | 32 | | 1.2373 | 33 | | 1.2216 | 34 | | 1.2080 | 35 | | 1.1907 | 36 | | 1.1820 | 37 | | 1.1650 | 38 | | 1.1622 | 39 | | 1.1498 | 40 | | 1.1347 | 41 | | 1.1299 | 42 | | 1.1197 | 43 | | 1.1100 | 44 | | 1.1094 | 45 | | 1.1033 | 46 | | 1.0943 | 47 | | 1.0918 | 48 | | 1.0832 | 49 | ### Framework versions - Transformers 4.20.1 - TensorFlow 2.8.2 - Datasets 2.3.2 - Tokenizers 0.12.1
dminiotas05/distilbert-base-uncased-finetuned-ft500_6class600
dminiotas05
2022-07-07T13:23:59Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-07-07T12:40:35Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-ft500_6class600 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-ft500_6class600 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.6317 - Accuracy: 0.35 - F1: 0.3327 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 1.5717 | 1.0 | 188 | 1.5375 | 0.3067 | 0.2820 | | 1.4338 | 2.0 | 376 | 1.5354 | 0.3207 | 0.2824 | | 1.3516 | 3.0 | 564 | 1.4852 | 0.3573 | 0.3287 | | 1.2722 | 4.0 | 752 | 1.4997 | 0.366 | 0.3534 | | 1.1923 | 5.0 | 940 | 1.5474 | 0.362 | 0.3454 | | 1.1156 | 6.0 | 1128 | 1.5998 | 0.3547 | 0.3387 | | 1.0522 | 7.0 | 1316 | 1.6154 | 0.3473 | 0.3316 | | 1.0148 | 8.0 | 1504 | 1.6317 | 0.35 | 0.3327 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
Zengwei/icefall-asr-librispeech-pruned-transducer-stateless5-2022-07-07
Zengwei
2022-07-07T13:03:44Z
0
0
null
[ "tensorboard", "region:us" ]
null
2022-07-07T07:51:32Z
Introduction See https://github.com/k2-fsa/icefall/pull/330 and https://github.com/k2-fsa/icefall/pull/452 It has random combiner inside. Note: There is something wrong in the log file, which has been fixed in https://github.com/k2-fsa/icefall/pull/468.
Zengwei/icefall-asr-librispeech-pruned-transducer-stateless5-M-2022-07-07
Zengwei
2022-07-07T12:30:37Z
0
0
null
[ "tensorboard", "region:us" ]
null
2022-07-07T10:17:44Z
Introduction See https://github.com/k2-fsa/icefall/pull/330 and https://github.com/k2-fsa/icefall/pull/452 It has random combiner inside.
Vikasbhandari/TRY
Vikasbhandari
2022-07-07T12:17:31Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "generated_from_trainer", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-07T11:42:30Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: TRY results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # TRY This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - eval_loss: 0.4234 - eval_wer: 0.3884 - eval_runtime: 51.9275 - eval_samples_per_second: 32.353 - eval_steps_per_second: 4.044 - epoch: 7.03 - step: 3500 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 1.18.3 - Tokenizers 0.12.1
TestZee/t5-small-finetuned-custom-wion-test-BIG
TestZee
2022-07-07T10:31:54Z
5
0
transformers
[ "transformers", "tf", "t5", "text2text-generation", "generated_from_keras_callback", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text2text-generation
2022-07-07T10:30:30Z
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: TestZee/t5-small-finetuned-custom-wion-test-BIG results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # TestZee/t5-small-finetuned-custom-wion-test-BIG This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.1165 - Validation Loss: 0.4609 - Epoch: 29 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 2e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 1.9622 | 0.8875 | 0 | | 1.9276 | 0.8601 | 1 | | 1.8301 | 0.8342 | 2 | | 1.7776 | 0.8104 | 3 | | 1.7345 | 0.7878 | 4 | | 1.7733 | 0.7660 | 5 | | 1.5626 | 0.7448 | 6 | | 1.6111 | 0.7245 | 7 | | 1.6754 | 0.7050 | 8 | | 1.5030 | 0.6867 | 9 | | 1.5101 | 0.6696 | 10 | | 1.4328 | 0.6536 | 11 | | 1.4311 | 0.6383 | 12 | | 1.3917 | 0.6232 | 13 | | 1.4102 | 0.6071 | 14 | | 1.3732 | 0.5948 | 15 | | 1.3468 | 0.5828 | 16 | | 1.2817 | 0.5712 | 17 | | 1.2920 | 0.5600 | 18 | | 1.2696 | 0.5491 | 19 | | 1.2552 | 0.5385 | 20 | | 1.1859 | 0.5285 | 21 | | 1.1995 | 0.5188 | 22 | | 1.1690 | 0.5094 | 23 | | 1.1678 | 0.5003 | 24 | | 1.1420 | 0.4916 | 25 | | 1.0959 | 0.4830 | 26 | | 1.0848 | 0.4750 | 27 | | 1.1248 | 0.4677 | 28 | | 1.1165 | 0.4609 | 29 | ### Framework versions - Transformers 4.20.1 - TensorFlow 2.8.2 - Datasets 2.3.2 - Tokenizers 0.12.1
zhifei/autotrain-chinese-title-summarization-8-1101140174
zhifei
2022-07-07T10:21:29Z
4
0
transformers
[ "transformers", "pytorch", "mt5", "text2text-generation", "autotrain", "unk", "dataset:zhifei/autotrain-data-chinese-title-summarization-8", "co2_eq_emissions", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text2text-generation
2022-07-07T10:19:46Z
--- tags: autotrain language: unk widget: - text: "I love AutoTrain 🤗" datasets: - zhifei/autotrain-data-chinese-title-summarization-8 co2_eq_emissions: 1.4118255120710663 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 1101140174 - CO2 Emissions (in grams): 1.4118255120710663 ## Validation Metrics - Loss: 0.0049639358185231686 - Rouge1: 49.3333 - Rouge2: 26.6667 - RougeL: 49.3333 - RougeLsum: 49.3333 - Gen Len: 15.12 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/zhifei/autotrain-chinese-title-summarization-8-1101140174 ```
osanseviero/ppo-LunarLander-v11
osanseviero
2022-07-07T09:43:04Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-07-07T09:42:42Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: -115.46 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
osanseviero/ppo-LunarLander-v10
osanseviero
2022-07-07T09:42:36Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-07-07T09:38:00Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: -574.85 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
huggingtweets/marsajal
huggingtweets
2022-07-07T09:42:16Z
4
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "huggingtweets", "en", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2022-03-02T23:29:05Z
--- language: en thumbnail: http://www.huggingtweets.com/marsajal/1657186931820/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1463196823728771079/wZc0m7cd_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">ajeng🦦</div> <div style="text-align: center; font-size: 14px;">@marsajal</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from ajeng🦦. | Data | ajeng🦦 | | --- | --- | | Tweets downloaded | 214 | | Retweets | 37 | | Short tweets | 41 | | Tweets kept | 136 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/3kdiymty/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @marsajal's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/lfk0v9ey) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/lfk0v9ey/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/marsajal') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
osanseviero/ppo-LunarLander-v9
osanseviero
2022-07-07T09:37:00Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-07-07T09:36:34Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: -30.40 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
osanseviero/ppo-LunarLander-v6
osanseviero
2022-07-07T09:29:20Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-07-07T09:07:08Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: -443.18 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
gary109/ai-light-dance_singing3_ft_wav2vec2-large-xlsr-53
gary109
2022-07-07T09:10:42Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "gary109/AI_Light_Dance", "generated_from_trainer", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-01T03:42:00Z
--- license: apache-2.0 tags: - automatic-speech-recognition - gary109/AI_Light_Dance - generated_from_trainer model-index: - name: ai-light-dance_singing3_ft_wav2vec2-large-xlsr-53 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ai-light-dance_singing3_ft_wav2vec2-large-xlsr-53 This model is a fine-tuned version of [gary109/ai-light-dance_singing3_ft_wav2vec2-large-xlsr-53](https://huggingface.co/gary109/ai-light-dance_singing3_ft_wav2vec2-large-xlsr-53) on the GARY109/AI_LIGHT_DANCE - ONSET-SINGING3 dataset. It achieves the following results on the evaluation set: - Loss: 0.8797 - Wer: 0.5513 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 30.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 0.9613 | 1.0 | 2309 | 1.0171 | 0.7271 | | 0.8254 | 2.0 | 4618 | 0.9771 | 0.6650 | | 0.7406 | 3.0 | 6927 | 0.9174 | 0.6420 | | 0.74 | 4.0 | 9236 | 0.9551 | 0.6371 | | 0.5855 | 5.0 | 11545 | 0.9262 | 0.6453 | | 0.5536 | 6.0 | 13854 | 0.9056 | 0.5894 | | 0.505 | 7.0 | 16163 | 0.9166 | 0.6029 | | 0.449 | 8.0 | 18472 | 0.8816 | 0.5873 | | 0.4219 | 9.0 | 20781 | 0.8970 | 0.5589 | | 0.5764 | 10.0 | 23090 | 0.9189 | 0.5649 | | 0.5075 | 11.0 | 25399 | 0.8797 | 0.5513 | | 0.4366 | 12.0 | 27708 | 0.9011 | 0.5567 | | 0.4915 | 13.0 | 30017 | 0.9248 | 0.5455 | | 0.3554 | 14.0 | 32326 | 0.9309 | 0.5374 | | 0.3975 | 15.0 | 34635 | 0.9103 | 0.5259 | | 0.4119 | 16.0 | 36944 | 0.9402 | 0.5290 | | 0.267 | 17.0 | 39253 | 0.9479 | 0.5115 | | 0.3107 | 18.0 | 41562 | 0.9428 | 0.5099 | | 0.2684 | 19.0 | 43871 | 0.9508 | 0.5133 | | 0.2125 | 20.0 | 46180 | 0.9737 | 0.5097 | | 0.3149 | 21.0 | 48489 | 0.9992 | 0.5095 | | 0.2313 | 22.0 | 50798 | 1.0037 | 0.5059 | | 0.2674 | 23.0 | 53107 | 1.0091 | 0.5040 | | 0.2056 | 24.0 | 55416 | 1.0082 | 0.5076 | | 0.2781 | 25.0 | 57725 | 1.0160 | 0.5015 | | 0.2005 | 26.0 | 60034 | 1.0390 | 0.5131 | | 0.2221 | 27.0 | 62343 | 1.0401 | 0.5074 | | 0.1857 | 28.0 | 64652 | 1.0484 | 0.5096 | | 0.1562 | 29.0 | 66961 | 1.0516 | 0.5064 | | 0.3027 | 30.0 | 69270 | 1.0543 | 0.5049 | ### Framework versions - Transformers 4.21.0.dev0 - Pytorch 1.9.1+cu102 - Datasets 2.3.3.dev0 - Tokenizers 0.12.1
kalyanavirundhubiryani/Best-Biryani-in-Chennai-Kalyana-virundhu-Biryani
kalyanavirundhubiryani
2022-07-07T09:07:31Z
0
0
null
[ "region:us" ]
null
2022-07-07T08:48:29Z
Kalyana Virundhu Biryani is one of the best biryani in Chennai." We Serve various types of Biryani along with our special side-Dish. Order us"Phone: +91 8939234566 or visit our website https://www.kalyanavirundhubiryani.com/ #biryanifamousinchennai #biryanibestinchennai #chennaibestbiryanihotel #specialbiryaniinchennai #KalyanaVirundhuBiryani
osanseviero/ppo-LunarLander-v5
osanseviero
2022-07-07T08:59:26Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-07-07T08:47:49Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: -479.21 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Sebabrata/lmv2-g-w9-2018-148-doc-07-07_1
Sebabrata
2022-07-07T08:52:38Z
5
0
transformers
[ "transformers", "pytorch", "tensorboard", "layoutlmv2", "token-classification", "generated_from_trainer", "license:cc-by-nc-sa-4.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-07-07T08:17:04Z
--- license: cc-by-nc-sa-4.0 tags: - generated_from_trainer model-index: - name: lmv2-g-w9-2018-148-doc-07-07_1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # lmv2-g-w9-2018-148-doc-07-07_1 This model is a fine-tuned version of [microsoft/layoutlmv2-base-uncased](https://huggingface.co/microsoft/layoutlmv2-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0160 - Address Precision: 0.9667 - Address Recall: 0.9667 - Address F1: 0.9667 - Address Number: 30 - Business Name Precision: 1.0 - Business Name Recall: 1.0 - Business Name F1: 1.0 - Business Name Number: 29 - City State Zip Code Precision: 1.0 - City State Zip Code Recall: 1.0 - City State Zip Code F1: 1.0 - City State Zip Code Number: 30 - Ein Precision: 0.0 - Ein Recall: 0.0 - Ein F1: 0.0 - Ein Number: 1 - List Account Number Precision: 1.0 - List Account Number Recall: 1.0 - List Account Number F1: 1.0 - List Account Number Number: 11 - Name Precision: 1.0 - Name Recall: 1.0 - Name F1: 1.0 - Name Number: 30 - Ssn Precision: 0.8333 - Ssn Recall: 1.0 - Ssn F1: 0.9091 - Ssn Number: 10 - Overall Precision: 0.9789 - Overall Recall: 0.9858 - Overall F1: 0.9823 - Overall Accuracy: 0.9995 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Address Precision | Address Recall | Address F1 | Address Number | Business Name Precision | Business Name Recall | Business Name F1 | Business Name Number | City State Zip Code Precision | City State Zip Code Recall | City State Zip Code F1 | City State Zip Code Number | Ein Precision | Ein Recall | Ein F1 | Ein Number | List Account Number Precision | List Account Number Recall | List Account Number F1 | List Account Number Number | Name Precision | Name Recall | Name F1 | Name Number | Ssn Precision | Ssn Recall | Ssn F1 | Ssn Number | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:----:|:---------------:|:-----------------:|:--------------:|:----------:|:--------------:|:-----------------------:|:--------------------:|:----------------:|:--------------------:|:-----------------------------:|:--------------------------:|:----------------------:|:--------------------------:|:-------------:|:----------:|:------:|:----------:|:-----------------------------:|:--------------------------:|:----------------------:|:--------------------------:|:--------------:|:-----------:|:-------:|:-----------:|:-------------:|:----------:|:------:|:----------:|:-----------------:|:--------------:|:----------:|:----------------:| | 1.5672 | 1.0 | 118 | 1.1527 | 0.0 | 0.0 | 0.0 | 30 | 0.0 | 0.0 | 0.0 | 29 | 0.0 | 0.0 | 0.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 0.0 | 0.0 | 0.0 | 11 | 0.0 | 0.0 | 0.0 | 30 | 0.0 | 0.0 | 0.0 | 10 | 0.0 | 0.0 | 0.0 | 0.9642 | | 0.8804 | 2.0 | 236 | 0.5661 | 0.2095 | 0.7333 | 0.3259 | 30 | 0.0 | 0.0 | 0.0 | 29 | 0.0 | 0.0 | 0.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 0.0 | 0.0 | 0.0 | 11 | 0.0 | 0.0 | 0.0 | 30 | 0.0 | 0.0 | 0.0 | 10 | 0.2095 | 0.1560 | 0.1789 | 0.9704 | | 0.3739 | 3.0 | 354 | 0.2118 | 0.9375 | 1.0 | 0.9677 | 30 | 0.7143 | 0.1724 | 0.2778 | 29 | 0.9375 | 1.0 | 0.9677 | 30 | 0.0 | 0.0 | 0.0 | 1 | 0.8182 | 0.8182 | 0.8182 | 11 | 0.5 | 1.0 | 0.6667 | 30 | 0.75 | 0.9 | 0.8182 | 10 | 0.7338 | 0.8014 | 0.7661 | 0.9932 | | 0.1626 | 4.0 | 472 | 0.1155 | 0.9375 | 1.0 | 0.9677 | 30 | 0.8710 | 0.9310 | 0.9 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 0.6923 | 0.8182 | 0.7500 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.7 | 0.7 | 0.7 | 10 | 0.9110 | 0.9433 | 0.9268 | 0.9976 | | 0.1031 | 5.0 | 590 | 0.0817 | 0.9355 | 0.9667 | 0.9508 | 30 | 0.8125 | 0.8966 | 0.8525 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 0.6923 | 0.8182 | 0.7500 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8182 | 0.9 | 0.8571 | 10 | 0.9048 | 0.9433 | 0.9236 | 0.9981 | | 0.0769 | 6.0 | 708 | 0.0634 | 0.9355 | 0.9667 | 0.9508 | 30 | 0.9333 | 0.9655 | 0.9492 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 0.6923 | 0.8182 | 0.7500 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8182 | 0.9 | 0.8571 | 10 | 0.9310 | 0.9574 | 0.9441 | 0.9984 | | 0.0614 | 7.0 | 826 | 0.0518 | 0.9667 | 0.9667 | 0.9667 | 30 | 1.0 | 1.0 | 1.0 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 0.6923 | 0.8182 | 0.7500 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8182 | 0.9 | 0.8571 | 10 | 0.9510 | 0.9645 | 0.9577 | 0.9991 | | 0.0509 | 8.0 | 944 | 0.0432 | 0.9667 | 0.9667 | 0.9667 | 30 | 1.0 | 1.0 | 1.0 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 0.8333 | 0.9091 | 0.8696 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8182 | 0.9 | 0.8571 | 10 | 0.9648 | 0.9716 | 0.9682 | 0.9994 | | 0.0431 | 9.0 | 1062 | 0.0369 | 0.9667 | 0.9667 | 0.9667 | 30 | 1.0 | 1.0 | 1.0 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8182 | 0.9 | 0.8571 | 10 | 0.9787 | 0.9787 | 0.9787 | 0.9994 | | 0.037 | 10.0 | 1180 | 0.0313 | 0.9667 | 0.9667 | 0.9667 | 30 | 1.0 | 1.0 | 1.0 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8182 | 0.9 | 0.8571 | 10 | 0.9787 | 0.9787 | 0.9787 | 0.9994 | | 0.0328 | 11.0 | 1298 | 0.0281 | 0.9667 | 0.9667 | 0.9667 | 30 | 1.0 | 1.0 | 1.0 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.7143 | 1.0 | 0.8333 | 10 | 0.9653 | 0.9858 | 0.9754 | 0.9994 | | 0.0295 | 12.0 | 1416 | 0.0246 | 0.7429 | 0.8667 | 0.8 | 30 | 1.0 | 1.0 | 1.0 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.6667 | 0.8 | 0.7273 | 10 | 0.9116 | 0.9504 | 0.9306 | 0.9991 | | 0.0251 | 13.0 | 1534 | 0.0207 | 0.9677 | 1.0 | 0.9836 | 30 | 0.9333 | 0.9655 | 0.9492 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8333 | 1.0 | 0.9091 | 10 | 0.9653 | 0.9858 | 0.9754 | 0.9994 | | 0.0231 | 14.0 | 1652 | 0.0210 | 0.9667 | 0.9667 | 0.9667 | 30 | 1.0 | 0.9655 | 0.9825 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8333 | 1.0 | 0.9091 | 10 | 0.9787 | 0.9787 | 0.9787 | 0.9991 | | 0.0184 | 15.0 | 1770 | 0.0160 | 0.9667 | 0.9667 | 0.9667 | 30 | 1.0 | 1.0 | 1.0 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8333 | 1.0 | 0.9091 | 10 | 0.9789 | 0.9858 | 0.9823 | 0.9995 | | 0.0162 | 16.0 | 1888 | 0.0142 | 0.9667 | 0.9667 | 0.9667 | 30 | 1.0 | 1.0 | 1.0 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8333 | 1.0 | 0.9091 | 10 | 0.9789 | 0.9858 | 0.9823 | 0.9995 | | 0.0142 | 17.0 | 2006 | 0.0127 | 0.9667 | 0.9667 | 0.9667 | 30 | 1.0 | 1.0 | 1.0 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8333 | 1.0 | 0.9091 | 10 | 0.9789 | 0.9858 | 0.9823 | 0.9995 | | 0.0123 | 18.0 | 2124 | 0.0114 | 0.9667 | 0.9667 | 0.9667 | 30 | 1.0 | 1.0 | 1.0 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8333 | 1.0 | 0.9091 | 10 | 0.9789 | 0.9858 | 0.9823 | 0.9995 | | 0.0118 | 19.0 | 2242 | 0.0152 | 0.9677 | 1.0 | 0.9836 | 30 | 0.6765 | 0.7931 | 0.7302 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 0.8333 | 0.9091 | 0.8696 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8182 | 0.9 | 0.8571 | 10 | 0.8859 | 0.9362 | 0.9103 | 0.9986 | | 0.0104 | 20.0 | 2360 | 0.0125 | 0.9677 | 1.0 | 0.9836 | 30 | 1.0 | 0.9655 | 0.9825 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.9091 | 1.0 | 0.9524 | 10 | 0.9789 | 0.9858 | 0.9823 | 0.9992 | | 0.0092 | 21.0 | 2478 | 0.0113 | 0.9677 | 1.0 | 0.9836 | 30 | 1.0 | 0.9655 | 0.9825 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8333 | 1.0 | 0.9091 | 10 | 0.9653 | 0.9858 | 0.9754 | 0.9993 | | 0.0089 | 22.0 | 2596 | 0.0111 | 0.9677 | 1.0 | 0.9836 | 30 | 1.0 | 0.9655 | 0.9825 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8333 | 1.0 | 0.9091 | 10 | 0.9789 | 0.9858 | 0.9823 | 0.9992 | | 0.0076 | 23.0 | 2714 | 0.0107 | 0.9677 | 1.0 | 0.9836 | 30 | 0.9310 | 0.9310 | 0.9310 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8333 | 1.0 | 0.9091 | 10 | 0.9650 | 0.9787 | 0.9718 | 0.9991 | | 0.0074 | 24.0 | 2832 | 0.0105 | 0.9677 | 1.0 | 0.9836 | 30 | 0.9310 | 0.9310 | 0.9310 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8182 | 0.9 | 0.8571 | 10 | 0.9514 | 0.9716 | 0.9614 | 0.9990 | | 0.007 | 25.0 | 2950 | 0.0092 | 0.9677 | 1.0 | 0.9836 | 30 | 1.0 | 0.9655 | 0.9825 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.7692 | 1.0 | 0.8696 | 10 | 0.9720 | 0.9858 | 0.9789 | 0.9991 | | 0.0062 | 26.0 | 3068 | 0.0061 | 0.9677 | 1.0 | 0.9836 | 30 | 1.0 | 1.0 | 1.0 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.7143 | 1.0 | 0.8333 | 10 | 0.9655 | 0.9929 | 0.9790 | 0.9994 | | 0.0057 | 27.0 | 3186 | 0.0056 | 0.9677 | 1.0 | 0.9836 | 30 | 1.0 | 1.0 | 1.0 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.8182 | 0.9 | 0.8571 | 10 | 0.9720 | 0.9858 | 0.9789 | 0.9995 | | 0.0047 | 28.0 | 3304 | 0.0054 | 0.9677 | 1.0 | 0.9836 | 30 | 1.0 | 1.0 | 1.0 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.7143 | 1.0 | 0.8333 | 10 | 0.9655 | 0.9929 | 0.9790 | 0.9994 | | 0.0042 | 29.0 | 3422 | 0.0052 | 0.9677 | 1.0 | 0.9836 | 30 | 1.0 | 1.0 | 1.0 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.7143 | 1.0 | 0.8333 | 10 | 0.9655 | 0.9929 | 0.9790 | 0.9994 | | 0.0039 | 30.0 | 3540 | 0.0049 | 0.9677 | 1.0 | 0.9836 | 30 | 1.0 | 1.0 | 1.0 | 29 | 1.0 | 1.0 | 1.0 | 30 | 0.0 | 0.0 | 0.0 | 1 | 1.0 | 1.0 | 1.0 | 11 | 1.0 | 1.0 | 1.0 | 30 | 0.7143 | 1.0 | 0.8333 | 10 | 0.9655 | 0.9929 | 0.9790 | 0.9994 | ### Framework versions - Transformers 4.21.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
paola-md/recipe-distilbert-is
paola-md
2022-07-07T08:34:16Z
6
0
transformers
[ "transformers", "pytorch", "distilbert", "fill-mask", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-07-06T17:09:58Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: recipe-distilbert-is results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # recipe-distilbert-is This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 4.0558 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 3.9409 | 1.0 | 1 | 4.0558 | ### Framework versions - Transformers 4.19.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.3.2 - Tokenizers 0.12.1
hsohn3/mayo-bert-visit-uncased-wordlevel-block512-batch4-ep100
hsohn3
2022-07-07T08:33:59Z
3
0
transformers
[ "transformers", "tf", "bert", "fill-mask", "generated_from_keras_callback", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-07-06T16:29:49Z
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: hsohn3/mayo-bert-visit-uncased-wordlevel-block512-batch4-ep100 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # hsohn3/mayo-bert-visit-uncased-wordlevel-block512-batch4-ep100 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.9559 - Epoch: 99 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 2e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Epoch | |:----------:|:-----:| | 4.1247 | 0 | | 3.5129 | 1 | | 3.4726 | 2 | | 3.4483 | 3 | | 3.4395 | 4 | | 3.4301 | 5 | | 3.4260 | 6 | | 3.4131 | 7 | | 3.3831 | 8 | | 3.2925 | 9 | | 3.2454 | 10 | | 3.2092 | 11 | | 3.1695 | 12 | | 3.1346 | 13 | | 3.0797 | 14 | | 3.0154 | 15 | | 2.9557 | 16 | | 2.8814 | 17 | | 2.7720 | 18 | | 2.5472 | 19 | | 2.3193 | 20 | | 2.1005 | 21 | | 1.9331 | 22 | | 1.7971 | 23 | | 1.6859 | 24 | | 1.6062 | 25 | | 1.5310 | 26 | | 1.4706 | 27 | | 1.4203 | 28 | | 1.3681 | 29 | | 1.3222 | 30 | | 1.2939 | 31 | | 1.2726 | 32 | | 1.2494 | 33 | | 1.2330 | 34 | | 1.2161 | 35 | | 1.1998 | 36 | | 1.1874 | 37 | | 1.1767 | 38 | | 1.1641 | 39 | | 1.1550 | 40 | | 1.1407 | 41 | | 1.1363 | 42 | | 1.1272 | 43 | | 1.1227 | 44 | | 1.1163 | 45 | | 1.1065 | 46 | | 1.1008 | 47 | | 1.0957 | 48 | | 1.0837 | 49 | | 1.0844 | 50 | | 1.0778 | 51 | | 1.0741 | 52 | | 1.0693 | 53 | | 1.0662 | 54 | | 1.0608 | 55 | | 1.0521 | 56 | | 1.0526 | 57 | | 1.0476 | 58 | | 1.0454 | 59 | | 1.0452 | 60 | | 1.0348 | 61 | | 1.0333 | 62 | | 1.0342 | 63 | | 1.0293 | 64 | | 1.0249 | 65 | | 1.0241 | 66 | | 1.0194 | 67 | | 1.0177 | 68 | | 1.0102 | 69 | | 1.0055 | 70 | | 1.0052 | 71 | | 1.0038 | 72 | | 1.0005 | 73 | | 0.9981 | 74 | | 0.9991 | 75 | | 0.9950 | 76 | | 0.9928 | 77 | | 0.9898 | 78 | | 0.9906 | 79 | | 0.9873 | 80 | | 0.9849 | 81 | | 0.9808 | 82 | | 0.9804 | 83 | | 0.9792 | 84 | | 0.9789 | 85 | | 0.9797 | 86 | | 0.9741 | 87 | | 0.9781 | 88 | | 0.9678 | 89 | | 0.9686 | 90 | | 0.9651 | 91 | | 0.9652 | 92 | | 0.9613 | 93 | | 0.9599 | 94 | | 0.9566 | 95 | | 0.9571 | 96 | | 0.9577 | 97 | | 0.9536 | 98 | | 0.9559 | 99 | ### Framework versions - Transformers 4.20.1 - TensorFlow 2.8.2 - Datasets 2.3.2 - Tokenizers 0.12.1
avichr/Legal-heBERT_ft
avichr
2022-07-07T07:31:58Z
28
3
transformers
[ "transformers", "pytorch", "bert", "fill-mask", "arxiv:1911.03090", "arxiv:2010.02559", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-05-05T06:49:36Z
# Legal-HeBERT Legal-HeBERT is a BERT model for Hebrew legal and legislative domains. It is intended to improve the legal NLP research and tools development in Hebrew. We release two versions of Legal-HeBERT. The first version is a fine-tuned model of [HeBERT](https://github.com/avichaychriqui/HeBERT) applied on legal and legislative documents. The second version uses [HeBERT](https://github.com/avichaychriqui/HeBERT)'s architecture guidlines to train a BERT model from scratch. <br> We continue collecting legal data, examining different architectural designs, and performing tagged datasets and legal tasks for evaluating and to development of a Hebrew legal tools. ## Training Data Our training datasets are: | Name | Hebrew Description | Size (GB) | Documents | Sentences | Words | Notes | |----------------------------------------------------------------------------------------------------------------------------------- |-------------------------------------------------------------------------- |----------- |----------- |------------ |------------- |----------------------------------------- | | The Israeli Law Book | ספר החוקים הישראלי | 0.05 | 2338 | 293352 | 4851063 | | | Judgments of the Supreme Court | מאגר פסקי הדין של בית המשפט העליון | 0.7 | 212348 | 5790138 | 79672415 | | | custody courts | החלטות בתי הדין למשמורת | 2.46 | 169,708 | 8,555,893 | 213,050,492 | | | Law memoranda, drafts of secondary legislation and drafts of support tests that have been distributed to the public for comment | תזכירי חוק, טיוטות חקיקת משנה וטיוטות מבחני תמיכה שהופצו להערות הציבור | 0.4 | 3,291 | 294,752 | 7,218,960 | | | Supervisors of Land Registration judgments | מאגר פסקי דין של המפקחים על רישום המקרקעין | 0.02 | 559 | 67,639 | 1,785,446 | | | Decisions of the Labor Court - Corona | מאגר החלטות בית הדין לעניין שירות התעסוקה – קורונה | 0.001 | 146 | 3505 | 60195 | | | Decisions of the Israel Lands Council | החלטות מועצת מקרקעי ישראל | | 118 | 11283 | 162692 | aggregate file | | Judgments of the Disciplinary Tribunal and the Israel Police Appeals Tribunal | פסקי דין של בית הדין למשמעת ובית הדין לערעורים של משטרת ישראל | 0.02 | 54 | 83724 | 1743419 | aggregate files | | Disciplinary Appeals Committee in the Ministry of Health | ועדת ערר לדין משמעתי במשרד הבריאות | 0.004 | 252 | 21010 | 429807 | 465 files are scanned and didn't parser | | Attorney General's Positions | מאגר התייצבויות היועץ המשפטי לממשלה | 0.008 | 281 | 32724 | 813877 | | | Legal-Opinion of the Attorney General | מאגר חוות דעת היועץ המשפטי לממשלה | 0.002 | 44 | 7132 | 188053 | | | | | | | | | | | total | | 3.665 | 389,139 | 15,161,152 | 309,976,419 | | We thank <b>Yair Gardin</b> for the referring to the governance data, <b>Elhanan Schwarts</b> for collecting and parsing The Israeli law book, and <b>Jonathan Schler</b> for collecting the judgments of the supreme court. ## Training process * Vocabulary size: 50,000 tokens * 4 epochs (1M steps±) * lr=5e-5 * mlm_probability=0.15 * batch size = 32 (for each gpu) * NVIDIA GeForce RTX 2080 TI + NVIDIA GeForce RTX 3090 (1 week training) ### Additional training settings: <b>Fine-tuned [HeBERT](https://github.com/avichaychriqui/HeBERT) model:</b> The first eight layers were freezed (like [Lee et al. (2019)](https://arxiv.org/abs/1911.03090) suggest)<br> <b>Legal-HeBERT trained from scratch:</b> The training process is similar to [HeBERT](https://github.com/avichaychriqui/HeBERT) and inspired by [Chalkidis et al. (2020)](https://arxiv.org/abs/2010.02559) <br> ## How to use The models can be found in huggingface hub and can be fine-tunned to any down-stream task: ``` # !pip install transformers==4.14.1 from transformers import AutoTokenizer, AutoModel model_name = 'avichr/Legal-heBERT_ft' # for the fine-tuned HeBERT model model_name = 'avichr/Legal-heBERT' # for legal HeBERT model trained from scratch tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModel.from_pretrained(model_name) from transformers import pipeline fill_mask = pipeline( "fill-mask", model=model_name, ) fill_mask("הקורונה לקחה את [MASK] ולנו לא נשאר דבר.") ``` ## Stay tuned! We are still working on our models and the datasets. We will edit this page as we progress. We are open for collaborations. ## If you used this model please cite us as : Chriqui, Avihay, Yahav, Inbal and Bar-Siman-Tov, Ittai, Legal HeBERT: A BERT-based NLP Model for Hebrew Legal, Judicial and Legislative Texts (June 27, 2022). Available at: https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4147127 ``` @article{chriqui2021hebert, title={Legal HeBERT: A BERT-based NLP Model for Hebrew Legal, Judicial and Legislative Texts}, author={Chriqui, Avihay, Yahav, Inbal and Bar-Siman-Tov, Ittai}, journal={SSRN preprint:4147127}, year={2022} } ``` ## Contact us [Avichay Chriqui](mailto:[email protected]), The Coller AI Lab <br> [Inbal yahav](mailto:[email protected]), The Coller AI Lab <br> [Ittai Bar-Siman-Tov](mailto:[email protected]), the BIU Innovation Lab for Law, Data-Science and Digital Ethics <br> Thank you, תודה, شكرا <br>
csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless5-L-2022-05-23
csukuangfj
2022-07-07T07:16:45Z
0
0
null
[ "tensorboard", "region:us" ]
null
2022-05-23T03:36:04Z
# Introduction See https://github.com/k2-fsa/icefall/pull/330 No random combiner inside. Tensorboard log: https://tensorboard.dev/experiment/VKoVx6IZTBuGCJN9kt72BQ/
ScarlettSun9/autotrain-ZuoZhuan-1100540141
ScarlettSun9
2022-07-07T07:08:04Z
4
0
transformers
[ "transformers", "pytorch", "roberta", "token-classification", "autotrain", "unk", "dataset:ScarlettSun9/autotrain-data-ZuoZhuan", "co2_eq_emissions", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-07-07T07:02:53Z
--- tags: autotrain language: unk widget: - text: "I love AutoTrain 🤗" datasets: - ScarlettSun9/autotrain-data-ZuoZhuan co2_eq_emissions: 8.343592303925112 --- # Model Trained Using AutoTrain - Problem type: Entity Extraction - Model ID: 1100540141 - CO2 Emissions (in grams): 8.343592303925112 ## Validation Metrics - Loss: 0.38094884157180786 - Accuracy: 0.8795777325860159 - Precision: 0.8171375141922127 - Recall: 0.8417033571821684 - F1: 0.8292385373953709 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/ScarlettSun9/autotrain-ZuoZhuan-1100540141 ``` Or Python API: ``` from transformers import AutoModelForTokenClassification, AutoTokenizer model = AutoModelForTokenClassification.from_pretrained("ScarlettSun9/autotrain-ZuoZhuan-1100540141", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("ScarlettSun9/autotrain-ZuoZhuan-1100540141", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
go2k/q-Taxi-v3
go2k
2022-07-07T05:45:11Z
0
0
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-07-07T05:39:36Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 --- # **Q-Learning** Agent playing **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="go2k/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
go2k/q-FrozenLake-v1-4x4-noSlippery
go2k
2022-07-07T05:26:00Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-07-07T05:25:54Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="go2k/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
domenicrosati/deberta-v3-xsmall-with-biblio-context-finetuned-review_classifier
domenicrosati
2022-07-07T05:12:58Z
16
0
transformers
[ "transformers", "pytorch", "tensorboard", "deberta-v2", "text-classification", "generated_from_trainer", "license:mit", "endpoints_compatible", "region:us" ]
text-classification
2022-07-06T01:12:52Z
--- license: mit tags: - text-classification - generated_from_trainer metrics: - accuracy - f1 - recall - precision model-index: - name: deberta-v3-xsmall-with-biblio-context-finetuned-review_classifier results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-v3-xsmall-with-biblio-context-finetuned-review_classifier This model is a fine-tuned version of [microsoft/deberta-v3-xsmall](https://huggingface.co/microsoft/deberta-v3-xsmall) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0979 - Accuracy: 0.9682 - F1: 0.8332 - Recall: 0.8466 - Precision: 0.8202 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4.5e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Recall | Precision | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:------:|:---------:| | 0.1539 | 1.0 | 6667 | 0.1237 | 0.9584 | 0.7668 | 0.7307 | 0.8067 | | 0.1271 | 2.0 | 13334 | 0.0979 | 0.9682 | 0.8332 | 0.8466 | 0.8202 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu102 - Datasets 2.3.2 - Tokenizers 0.12.1
Evelyn18/distilbert-base-uncased-becasv2-4
Evelyn18
2022-07-07T04:16:06Z
5
0
transformers
[ "transformers", "pytorch", "tensorboard", "distilbert", "question-answering", "generated_from_trainer", "dataset:becasv2", "license:apache-2.0", "endpoints_compatible", "region:us" ]
question-answering
2022-07-07T04:11:32Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - becasv2 model-index: - name: distilbert-base-uncased-becasv2-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-becasv2-4 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the becasv2 dataset. It achieves the following results on the evaluation set: - Loss: 3.4637 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 6 | 5.3677 | | No log | 2.0 | 12 | 4.6741 | | No log | 3.0 | 18 | 4.2978 | | No log | 4.0 | 24 | 3.9963 | | No log | 5.0 | 30 | 3.7544 | | No log | 6.0 | 36 | 3.5810 | | No log | 7.0 | 42 | 3.4932 | | No log | 8.0 | 48 | 3.4637 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
Evelyn18/distilbert-base-uncased-becasv2-3
Evelyn18
2022-07-07T04:00:45Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "distilbert", "question-answering", "generated_from_trainer", "dataset:becasv2", "license:apache-2.0", "endpoints_compatible", "region:us" ]
question-answering
2022-07-07T03:55:54Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - becasv2 model-index: - name: distilbert-base-uncased-becasv2-3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-becasv2-3 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the becasv2 dataset. It achieves the following results on the evaluation set: - Loss: 3.1218 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 10 - eval_batch_size: 10 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 9 | 4.6377 | | No log | 2.0 | 18 | 3.8511 | | No log | 3.0 | 27 | 3.3758 | | No log | 4.0 | 36 | 3.1910 | | No log | 5.0 | 45 | 3.1187 | | No log | 6.0 | 54 | 3.1009 | | No log | 7.0 | 63 | 3.1131 | | No log | 8.0 | 72 | 3.1218 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
Evelyn18/distilbert-base-uncased-becasv2-2
Evelyn18
2022-07-07T03:47:53Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "distilbert", "question-answering", "generated_from_trainer", "dataset:becasv2", "license:apache-2.0", "endpoints_compatible", "region:us" ]
question-answering
2022-07-07T03:43:16Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - becasv2 model-index: - name: distilbert-base-uncased-becasv2-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-becasv2-2 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the becasv2 dataset. It achieves the following results on the evaluation set: - Loss: 2.9170 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 10 - eval_batch_size: 10 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 9 | 4.8334 | | No log | 2.0 | 18 | 3.9395 | | No log | 3.0 | 27 | 3.4886 | | No log | 4.0 | 36 | 3.2190 | | No log | 5.0 | 45 | 3.0781 | | No log | 6.0 | 54 | 2.9878 | | No log | 7.0 | 63 | 2.9336 | | No log | 8.0 | 72 | 2.9170 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
Evelyn18/distilbert-base-uncased-becasv2-1
Evelyn18
2022-07-07T03:38:53Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "distilbert", "question-answering", "generated_from_trainer", "dataset:becasv2", "license:apache-2.0", "endpoints_compatible", "region:us" ]
question-answering
2022-07-07T03:34:17Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - becasv2 model-index: - name: distilbert-base-uncased-becasv2-1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-becasv2-1 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the becasv2 dataset. It achieves the following results on the evaluation set: - Loss: 2.9472 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 10 - eval_batch_size: 10 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 9 | 4.6722 | | No log | 2.0 | 18 | 3.9450 | | No log | 3.0 | 27 | 3.4890 | | No log | 4.0 | 36 | 3.2251 | | No log | 5.0 | 45 | 2.9906 | | No log | 6.0 | 54 | 3.0790 | | No log | 7.0 | 63 | 2.8791 | | No log | 8.0 | 72 | 2.9654 | | No log | 9.0 | 81 | 2.9460 | | No log | 10.0 | 90 | 2.9472 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
mikesong724/deberta-wiki-2010
mikesong724
2022-07-07T03:29:19Z
5
0
transformers
[ "transformers", "pytorch", "deberta", "fill-mask", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-07-07T03:19:23Z
DeBERTa trained from scratch continued training from https://huggingface.co/mikesong724/deberta-wiki-2006 Source data: https://dumps.wikimedia.org/archive/2010/ Tools used: https://github.com/mikesong724/Point-in-Time-Language-Model 2010 wiki archive 6.1 GB trained 18 epochs = 108GB + 2006 (65GB) GLUE benchmark cola (3e): matthews corr: 0.3640 sst2 (3e): acc: 0.9106 mrpc (5e): F1: 0.8505, acc: 0.7794 stsb (3e): pearson: 0.8339, spearman: 0.8312 qqp (3e): acc: 0.8965, F1: 0.8604 mnli (3e): acc_mm: 0.8023 qnli (3e): acc: 0.8889 rte (3e): acc: 0.5271 wnli (5e): acc: 0.3380
ChauNguyen23/distilbert-base-uncased-finetuned-imdb
ChauNguyen23
2022-07-07T02:54:46Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "distilbert", "fill-mask", "generated_from_trainer", "dataset:imdb", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-07-07T02:48:22Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb model-index: - name: distilbert-base-uncased-finetuned-imdb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-imdb This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 2.4721 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.7086 | 1.0 | 157 | 2.4897 | | 2.5796 | 2.0 | 314 | 2.4230 | | 2.5269 | 3.0 | 471 | 2.4354 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
huggingtweets/joviex
huggingtweets
2022-07-07T01:05:09Z
3
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "huggingtweets", "en", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2022-07-07T01:03:44Z
--- language: en thumbnail: http://www.huggingtweets.com/joviex/1657155904240/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1481464434123894785/YmWpO9TE_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">lɐǝɹ sı ǝʌıʇɔǝdsɹǝd</div> <div style="text-align: center; font-size: 14px;">@joviex</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from lɐǝɹ sı ǝʌıʇɔǝdsɹǝd. | Data | lɐǝɹ sı ǝʌıʇɔǝdsɹǝd | | --- | --- | | Tweets downloaded | 3248 | | Retweets | 36 | | Short tweets | 259 | | Tweets kept | 2953 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/2xrk357z/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @joviex's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/25r2lx70) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/25r2lx70/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/joviex') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
taln-ls2n/POET
taln-ls2n
2022-07-06T23:49:35Z
4
2
transformers
[ "transformers", "pytorch", "camembert", "token-classification", "Transformers", "sequence-tagger-model", "fr", "dataset:qanastek/ANTILLES", "arxiv:1911.03894", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-05-11T09:33:05Z
--- tags: - Transformers - token-classification - sequence-tagger-model language: fr datasets: - qanastek/ANTILLES widget: - text: "George Washington est allé à Washington" --- # POET: A French Extended Part-of-Speech Tagger - Corpora: [ANTILLES](https://github.com/qanastek/ANTILLES) - Embeddings & Sequence Labelling: [CamemBERT](https://arxiv.org/abs/1911.03894) - Number of Epochs: 115 **People Involved** * [LABRAK Yanis](https://www.linkedin.com/in/yanis-labrak-8a7412145/) (1) * [DUFOUR Richard](https://cv.archives-ouvertes.fr/richard-dufour) (2) **Affiliations** 1. [LIA, NLP team](https://lia.univ-avignon.fr/), Avignon University, Avignon, France. 2. [LS2N, TALN team](https://www.ls2n.fr/equipe/taln/), Nantes University, Nantes, France. ## Demo: How to use in HuggingFace Transformers Requires [transformers](https://pypi.org/project/transformers/): ```pip install transformers``` ```python from transformers import CamembertTokenizer, CamembertForTokenClassification, TokenClassificationPipeline tokenizer = CamembertTokenizer.from_pretrained('taln-ls2n/POET') model = CamembertForTokenClassification.from_pretrained('taln-ls2n/POET') pos = TokenClassificationPipeline(model=model, tokenizer=tokenizer) def make_prediction(sentence): labels = [l['entity'] for l in pos(sentence)] return list(zip(sentence.split(" "), labels)) res = make_prediction("George Washington est allé à Washington") ``` Output: ![Preview Output](preview.PNG) ## Training data `ANTILLES` is a part-of-speech tagging corpora based on [UD_French-GSD](https://universaldependencies.org/treebanks/fr_gsd/index.html) which was originally created in 2015 and is based on the [universal dependency treebank v2.0](https://github.com/ryanmcd/uni-dep-tb). Originally, the corpora consists of 400,399 words (16,341 sentences) and had 17 different classes. Now, after applying our tags augmentation we obtain 60 different classes which add linguistic and semantic information such as the gender, number, mood, person, tense or verb form given in the different CoNLL-03 fields from the original corpora. We based our tags on the level of details given by the [LIA_TAGG](http://pageperso.lif.univ-mrs.fr/frederic.bechet/download.html) statistical POS tagger written by [Frédéric Béchet](http://pageperso.lif.univ-mrs.fr/frederic.bechet/index-english.html) in 2001. The corpora used for this model is available on [Github](https://github.com/qanastek/ANTILLES) at the [CoNLL-U format](https://universaldependencies.org/format.html). Training data are fed to the model as free language and doesn't pass a normalization phase. Thus, it's made the model case and punctuation sensitive. ## Original Tags ```plain PRON VERB SCONJ ADP CCONJ DET NOUN ADJ AUX ADV PUNCT PROPN NUM SYM PART X INTJ ``` ## New additional POS tags | Abbreviation | Description | Examples | |:--------:|:--------:|:--------:| | PREP | Preposition | de | | AUX | Auxiliary Verb | est | | ADV | Adverb | toujours | | COSUB | Subordinating conjunction | que | | COCO | Coordinating Conjunction | et | | PART | Demonstrative particle | -t | | PRON | Pronoun | qui ce quoi | | PDEMMS | Demonstrative Pronoun - Singular Masculine | ce | | PDEMMP | Demonstrative Pronoun - Plural Masculine | ceux | | PDEMFS | Demonstrative Pronoun - Singular Feminine | cette | | PDEMFP | Demonstrative Pronoun - Plural Feminine | celles | | PINDMS | Indefinite Pronoun - Singular Masculine | tout | | PINDMP | Indefinite Pronoun - Plural Masculine | autres | | PINDFS | Indefinite Pronoun - Singular Feminine | chacune | | PINDFP | Indefinite Pronoun - Plural Feminine | certaines | | PROPN | Proper noun | Houston | | XFAMIL | Last name | Levy | | NUM | Numerical Adjective | trentaine vingtaine | | DINTMS | Masculine Numerical Adjective | un | | DINTFS | Feminine Numerical Adjective | une | | PPOBJMS | Pronoun complements of objects - Singular Masculine | le lui | | PPOBJMP | Pronoun complements of objects - Plural Masculine | eux y | | PPOBJFS | Pronoun complements of objects - Singular Feminine | moi la | | PPOBJFP | Pronoun complements of objects - Plural Feminine | en y | | PPER1S | Personal Pronoun First-Person - Singular | je | | PPER2S | Personal Pronoun Second-Person - Singular | tu | | PPER3MS | Personal Pronoun Third-Person - Singular Masculine | il | | PPER3MP | Personal Pronoun Third-Person - Plural Masculine | ils | | PPER3FS | Personal Pronoun Third-Person - Singular Feminine | elle | | PPER3FP | Personal Pronoun Third-Person - Plural Feminine | elles | | PREFS | Reflexive Pronoun First-Person - Singular | me m' | | PREF | Reflexive Pronoun Third-Person - Singular | se s' | | PREFP | Reflexive Pronoun First / Second-Person - Plural | nous vous | | VERB | Verb | obtient | | VPPMS | Past Participle - Singular Masculine | formulé | | VPPMP | Past Participle - Plural Masculine | classés | | VPPFS | Past Participle - Singular Feminine | appelée | | VPPFP | Past Participle - Plural Feminine | sanctionnées | | DET | Determinant | les l' | | DETMS | Determinant - Singular Masculine | les | | DETFS | Determinant - Singular Feminine | la | | ADJ | Adjective | capable sérieux | | ADJMS | Adjective - Singular Masculine | grand important | | ADJMP | Adjective - Plural Masculine | grands petits | | ADJFS | Adjective - Singular Feminine | française petite | | ADJFP | Adjective - Plural Feminine | légères petites | | NOUN | Noun | temps | | NMS | Noun - Singular Masculine | drapeau | | NMP | Noun - Plural Masculine | journalistes | | NFS | Noun - Singular Feminine | tête | | NFP | Noun - Plural Feminine | ondes | | PREL | Relative Pronoun | qui dont | | PRELMS | Relative Pronoun - Singular Masculine | lequel | | PRELMP | Relative Pronoun - Plural Masculine | lesquels | | PRELFS | Relative Pronoun - Singular Feminine | laquelle | | PRELFP | Relative Pronoun - Plural Feminine | lesquelles | | INTJ | Interjection | merci bref | | CHIF | Numbers | 1979 10 | | SYM | Symbol | € % | | YPFOR | Endpoint | . | | PUNCT | Ponctuation | : , | | MOTINC | Unknown words | Technology Lady | | X | Typos & others | sfeir 3D statu | ## Evaluation results The test corpora used for this evaluation is available on [Github](https://github.com/qanastek/ANTILLES/blob/main/ANTILLES/test.conllu). ```plain precision recall f1-score support ADJ 0.9040 0.8828 0.8933 128 ADJFP 0.9811 0.9585 0.9697 434 ADJFS 0.9606 0.9826 0.9715 918 ADJMP 0.9613 0.9357 0.9483 451 ADJMS 0.9561 0.9611 0.9586 952 ADV 0.9870 0.9948 0.9908 1524 AUX 0.9956 0.9964 0.9960 1124 CHIF 0.9798 0.9774 0.9786 1239 COCO 1.0000 0.9989 0.9994 884 COSUB 0.9939 0.9939 0.9939 328 DET 0.9972 0.9972 0.9972 2897 DETFS 0.9990 1.0000 0.9995 1007 DETMS 1.0000 0.9993 0.9996 1426 DINTFS 0.9967 0.9902 0.9934 306 DINTMS 0.9923 0.9948 0.9935 387 INTJ 0.8000 0.8000 0.8000 5 MOTINC 0.5049 0.5827 0.5410 266 NFP 0.9807 0.9675 0.9740 892 NFS 0.9778 0.9699 0.9738 2588 NMP 0.9687 0.9495 0.9590 1367 NMS 0.9759 0.9560 0.9659 3181 NOUN 0.6164 0.8673 0.7206 113 NUM 0.6250 0.8333 0.7143 6 PART 1.0000 0.9375 0.9677 16 PDEMFP 1.0000 1.0000 1.0000 3 PDEMFS 1.0000 1.0000 1.0000 89 PDEMMP 1.0000 1.0000 1.0000 20 PDEMMS 1.0000 1.0000 1.0000 222 PINDFP 1.0000 1.0000 1.0000 3 PINDFS 0.8571 1.0000 0.9231 12 PINDMP 0.9000 1.0000 0.9474 9 PINDMS 0.9286 0.9701 0.9489 67 PINTFS 0.0000 0.0000 0.0000 2 PPER1S 1.0000 1.0000 1.0000 62 PPER2S 0.7500 1.0000 0.8571 3 PPER3FP 1.0000 1.0000 1.0000 9 PPER3FS 1.0000 1.0000 1.0000 96 PPER3MP 1.0000 1.0000 1.0000 31 PPER3MS 1.0000 1.0000 1.0000 377 PPOBJFP 1.0000 0.7500 0.8571 4 PPOBJFS 0.9167 0.8919 0.9041 37 PPOBJMP 0.7500 0.7500 0.7500 12 PPOBJMS 0.9371 0.9640 0.9504 139 PREF 1.0000 1.0000 1.0000 332 PREFP 1.0000 1.0000 1.0000 64 PREFS 1.0000 1.0000 1.0000 13 PREL 0.9964 0.9964 0.9964 277 PRELFP 1.0000 1.0000 1.0000 5 PRELFS 0.8000 1.0000 0.8889 4 PRELMP 1.0000 1.0000 1.0000 3 PRELMS 1.0000 1.0000 1.0000 11 PREP 0.9971 0.9977 0.9974 6161 PRON 0.9836 0.9836 0.9836 61 PROPN 0.9468 0.9503 0.9486 4310 PUNCT 1.0000 1.0000 1.0000 4019 SYM 0.9394 0.8158 0.8732 76 VERB 0.9956 0.9921 0.9938 2273 VPPFP 0.9145 0.9469 0.9304 113 VPPFS 0.9562 0.9597 0.9580 273 VPPMP 0.8827 0.9728 0.9256 147 VPPMS 0.9778 0.9794 0.9786 630 VPPRE 0.0000 0.0000 0.0000 1 X 0.9604 0.9935 0.9766 1073 XFAMIL 0.9386 0.9113 0.9248 1342 YPFOR 1.0000 1.0000 1.0000 2750 accuracy 0.9778 47574 macro avg 0.9151 0.9285 0.9202 47574 weighted avg 0.9785 0.9778 0.9780 47574 ``` ## BibTeX Citations Please cite the following paper when using this model. ANTILLES corpus and POET taggers: ```latex @inproceedings{labrak:hal-03696042, TITLE = {{ANTILLES: An Open French Linguistically Enriched Part-of-Speech Corpus}}, AUTHOR = {Labrak, Yanis and Dufour, Richard}, URL = {https://hal.archives-ouvertes.fr/hal-03696042}, BOOKTITLE = {{25th International Conference on Text, Speech and Dialogue (TSD)}}, ADDRESS = {Brno, Czech Republic}, PUBLISHER = {{Springer}}, YEAR = {2022}, MONTH = Sep, KEYWORDS = {Part-of-speech corpus ; POS tagging ; Open tools ; Word embeddings ; Bi-LSTM ; CRF ; Transformers}, PDF = {https://hal.archives-ouvertes.fr/hal-03696042/file/ANTILLES_A_freNch_linguisTIcaLLy_Enriched_part_of_Speech_corpus.pdf}, HAL_ID = {hal-03696042}, HAL_VERSION = {v1}, } ``` UD_French-GSD corpora: ```latex @misc{ universaldependencies, title={UniversalDependencies/UD_French-GSD}, url={https://github.com/UniversalDependencies/UD_French-GSD}, journal={GitHub}, author={UniversalDependencies} } ``` LIA TAGG: ```latex @techreport{LIA_TAGG, author = {Frédéric Béchet}, title = {LIA_TAGG: a statistical POS tagger + syntactic bracketer}, institution = {Aix-Marseille University & CNRS}, year = {2001} } ``` Flair Embeddings: ```latex @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` ## Acknowledgment This work was financially supported by [Zenidoc](https://zenidoc.fr/) and the [ANR project DIETS](https://anr-diets.univ-avignon.fr) under the contract [ANR-20-CE23-0005](https://anr.fr/en/funded-projects-and-impact/funded-projects/project/funded/project/b2d9d3668f92a3b9fbbf7866072501ef-fd7e69d902/?tx_anrprojects_funded%5Bcontroller%5D=Funded&cHash=cb6d54d24c9e21e0d50fabf46bd56646).
qanastek/pos-french-camembert-flair
qanastek
2022-07-06T23:49:12Z
52
3
flair
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "fr", "dataset:qanastek/ANTILLES", "arxiv:1911.03894", "arxiv:1011.4088", "region:us" ]
token-classification
2022-03-02T23:29:05Z
--- tags: - flair - token-classification - sequence-tagger-model language: fr datasets: - qanastek/ANTILLES widget: - text: "George Washington est allé à Washington" --- # POET: A French Extended Part-of-Speech Tagger - Corpora: [ANTILLES](https://github.com/qanastek/ANTILLES) - Embeddings: [Flair](https://aclanthology.org/C18-1139.pdf) & [CamemBERT](https://arxiv.org/abs/1911.03894) - Sequence Labelling: [Bi-LSTM-CRF](https://arxiv.org/abs/1011.4088) - Number of Epochs: 50 **People Involved** * [LABRAK Yanis](https://www.linkedin.com/in/yanis-labrak-8a7412145/) (1) * [DUFOUR Richard](https://cv.archives-ouvertes.fr/richard-dufour) (2) **Affiliations** 1. [LIA, NLP team](https://lia.univ-avignon.fr/), Avignon University, Avignon, France. 2. [LS2N, TALN team](https://www.ls2n.fr/equipe/taln/), Nantes University, Nantes, France. ## Demo: How to use in Flair Requires [Flair](https://pypi.org/project/flair/): ```pip install flair``` ```python from flair.data import Sentence from flair.models import SequenceTagger # Load the model model = SequenceTagger.load("qanastek/pos-french") sentence = Sentence("George Washington est allé à Washington") # predict tags model.predict(sentence) # print predicted pos tags print(sentence.to_tagged_string()) ``` Output: ![Preview Output](preview.PNG) ## Training data `ANTILLES` is a part-of-speech tagging corpora based on [UD_French-GSD](https://universaldependencies.org/treebanks/fr_gsd/index.html) which was originally created in 2015 and is based on the [universal dependency treebank v2.0](https://github.com/ryanmcd/uni-dep-tb). Originally, the corpora consists of 400,399 words (16,341 sentences) and had 17 different classes. Now, after applying our tags augmentation we obtain 60 different classes which add linguistic and semantic information such as the gender, number, mood, person, tense or verb form given in the different CoNLL-03 fields from the original corpora. We based our tags on the level of details given by the [LIA_TAGG](http://pageperso.lif.univ-mrs.fr/frederic.bechet/download.html) statistical POS tagger written by [Frédéric Béchet](http://pageperso.lif.univ-mrs.fr/frederic.bechet/index-english.html) in 2001. The corpora used for this model is available on [Github](https://github.com/qanastek/ANTILLES) at the [CoNLL-U format](https://universaldependencies.org/format.html). Training data are fed to the model as free language and doesn't pass a normalization phase. Thus, it's made the model case and punctuation sensitive. ## Original Tags ```plain PRON VERB SCONJ ADP CCONJ DET NOUN ADJ AUX ADV PUNCT PROPN NUM SYM PART X INTJ ``` ## New additional POS tags | Abbreviation | Description | Examples | |:--------:|:--------:|:--------:| | PREP | Preposition | de | | AUX | Auxiliary Verb | est | | ADV | Adverb | toujours | | COSUB | Subordinating conjunction | que | | COCO | Coordinating Conjunction | et | | PART | Demonstrative particle | -t | | PRON | Pronoun | qui ce quoi | | PDEMMS | Demonstrative Pronoun - Singular Masculine | ce | | PDEMMP | Demonstrative Pronoun - Plural Masculine | ceux | | PDEMFS | Demonstrative Pronoun - Singular Feminine | cette | | PDEMFP | Demonstrative Pronoun - Plural Feminine | celles | | PINDMS | Indefinite Pronoun - Singular Masculine | tout | | PINDMP | Indefinite Pronoun - Plural Masculine | autres | | PINDFS | Indefinite Pronoun - Singular Feminine | chacune | | PINDFP | Indefinite Pronoun - Plural Feminine | certaines | | PROPN | Proper noun | Houston | | XFAMIL | Last name | Levy | | NUM | Numerical Adjective | trentaine vingtaine | | DINTMS | Masculine Numerical Adjective | un | | DINTFS | Feminine Numerical Adjective | une | | PPOBJMS | Pronoun complements of objects - Singular Masculine | le lui | | PPOBJMP | Pronoun complements of objects - Plural Masculine | eux y | | PPOBJFS | Pronoun complements of objects - Singular Feminine | moi la | | PPOBJFP | Pronoun complements of objects - Plural Feminine | en y | | PPER1S | Personal Pronoun First-Person - Singular | je | | PPER2S | Personal Pronoun Second-Person - Singular | tu | | PPER3MS | Personal Pronoun Third-Person - Singular Masculine | il | | PPER3MP | Personal Pronoun Third-Person - Plural Masculine | ils | | PPER3FS | Personal Pronoun Third-Person - Singular Feminine | elle | | PPER3FP | Personal Pronoun Third-Person - Plural Feminine | elles | | PREFS | Reflexive Pronoun First-Person - Singular | me m' | | PREF | Reflexive Pronoun Third-Person - Singular | se s' | | PREFP | Reflexive Pronoun First / Second-Person - Plural | nous vous | | VERB | Verb | obtient | | VPPMS | Past Participle - Singular Masculine | formulé | | VPPMP | Past Participle - Plural Masculine | classés | | VPPFS | Past Participle - Singular Feminine | appelée | | VPPFP | Past Participle - Plural Feminine | sanctionnées | | DET | Determinant | les l' | | DETMS | Determinant - Singular Masculine | les | | DETFS | Determinant - Singular Feminine | la | | ADJ | Adjective | capable sérieux | | ADJMS | Adjective - Singular Masculine | grand important | | ADJMP | Adjective - Plural Masculine | grands petits | | ADJFS | Adjective - Singular Feminine | française petite | | ADJFP | Adjective - Plural Feminine | légères petites | | NOUN | Noun | temps | | NMS | Noun - Singular Masculine | drapeau | | NMP | Noun - Plural Masculine | journalistes | | NFS | Noun - Singular Feminine | tête | | NFP | Noun - Plural Feminine | ondes | | PREL | Relative Pronoun | qui dont | | PRELMS | Relative Pronoun - Singular Masculine | lequel | | PRELMP | Relative Pronoun - Plural Masculine | lesquels | | PRELFS | Relative Pronoun - Singular Feminine | laquelle | | PRELFP | Relative Pronoun - Plural Feminine | lesquelles | | INTJ | Interjection | merci bref | | CHIF | Numbers | 1979 10 | | SYM | Symbol | € % | | YPFOR | Endpoint | . | | PUNCT | Ponctuation | : , | | MOTINC | Unknown words | Technology Lady | | X | Typos & others | sfeir 3D statu | ## Evaluation results The test corpora used for this evaluation is available on [Github](https://github.com/qanastek/ANTILLES/blob/main/ANTILLES/test.conllu). ```plain Results: - F-score (micro) 0.9797 - F-score (macro) 0.9178 - Accuracy 0.9797 By class: precision recall f1-score support PREP 0.9966 0.9987 0.9976 1483 PUNCT 1.0000 1.0000 1.0000 833 NMS 0.9634 0.9801 0.9717 753 DET 0.9923 0.9984 0.9954 645 VERB 0.9913 0.9811 0.9862 583 NFS 0.9667 0.9839 0.9752 560 ADV 0.9940 0.9821 0.9880 504 PROPN 0.9541 0.8937 0.9229 395 DETMS 1.0000 1.0000 1.0000 362 AUX 0.9860 0.9915 0.9888 355 YPFOR 1.0000 1.0000 1.0000 353 NMP 0.9666 0.9475 0.9570 305 COCO 0.9959 1.0000 0.9980 245 ADJMS 0.9463 0.9385 0.9424 244 DETFS 1.0000 1.0000 1.0000 240 CHIF 0.9648 0.9865 0.9755 222 NFP 0.9515 0.9849 0.9679 199 ADJFS 0.9657 0.9286 0.9468 182 VPPMS 0.9387 0.9745 0.9563 157 COSUB 1.0000 0.9844 0.9921 128 DINTMS 0.9918 0.9918 0.9918 122 XFAMIL 0.9298 0.9217 0.9258 115 PPER3MS 1.0000 1.0000 1.0000 87 ADJMP 0.9294 0.9634 0.9461 82 PDEMMS 1.0000 1.0000 1.0000 75 ADJFP 0.9861 0.9342 0.9595 76 PREL 0.9859 1.0000 0.9929 70 DINTFS 0.9839 1.0000 0.9919 61 PREF 1.0000 1.0000 1.0000 52 PPOBJMS 0.9565 0.9362 0.9462 47 PREFP 0.9778 1.0000 0.9888 44 PINDMS 1.0000 0.9773 0.9885 44 VPPFS 0.8298 0.9750 0.8966 40 PPER1S 1.0000 1.0000 1.0000 42 SYM 1.0000 0.9474 0.9730 38 NOUN 0.8824 0.7692 0.8219 39 PRON 1.0000 0.9677 0.9836 31 PDEMFS 1.0000 1.0000 1.0000 29 VPPMP 0.9286 1.0000 0.9630 26 ADJ 0.9524 0.9091 0.9302 22 PPER3MP 1.0000 1.0000 1.0000 20 VPPFP 1.0000 1.0000 1.0000 19 PPER3FS 1.0000 1.0000 1.0000 18 MOTINC 0.3333 0.4000 0.3636 15 PREFS 1.0000 1.0000 1.0000 10 PPOBJMP 1.0000 0.8000 0.8889 10 PPOBJFS 0.6250 0.8333 0.7143 6 INTJ 0.5000 0.6667 0.5714 6 PART 1.0000 1.0000 1.0000 4 PDEMMP 1.0000 1.0000 1.0000 3 PDEMFP 1.0000 1.0000 1.0000 3 PPER3FP 1.0000 1.0000 1.0000 2 NUM 1.0000 0.3333 0.5000 3 PPER2S 1.0000 1.0000 1.0000 2 PPOBJFP 0.5000 0.5000 0.5000 2 PRELMS 1.0000 1.0000 1.0000 2 PINDFS 0.5000 1.0000 0.6667 1 PINDMP 1.0000 1.0000 1.0000 1 X 0.0000 0.0000 0.0000 1 PINDFP 1.0000 1.0000 1.0000 1 micro avg 0.9797 0.9797 0.9797 10019 macro avg 0.9228 0.9230 0.9178 10019 weighted avg 0.9802 0.9797 0.9798 10019 samples avg 0.9797 0.9797 0.9797 10019 ``` ## BibTeX Citations Please cite the following paper when using this model. ANTILLES corpus and POET taggers: ```latex @inproceedings{labrak:hal-03696042, TITLE = {{ANTILLES: An Open French Linguistically Enriched Part-of-Speech Corpus}}, AUTHOR = {Labrak, Yanis and Dufour, Richard}, URL = {https://hal.archives-ouvertes.fr/hal-03696042}, BOOKTITLE = {{25th International Conference on Text, Speech and Dialogue (TSD)}}, ADDRESS = {Brno, Czech Republic}, PUBLISHER = {{Springer}}, YEAR = {2022}, MONTH = Sep, KEYWORDS = {Part-of-speech corpus ; POS tagging ; Open tools ; Word embeddings ; Bi-LSTM ; CRF ; Transformers}, PDF = {https://hal.archives-ouvertes.fr/hal-03696042/file/ANTILLES_A_freNch_linguisTIcaLLy_Enriched_part_of_Speech_corpus.pdf}, HAL_ID = {hal-03696042}, HAL_VERSION = {v1}, } ``` UD_French-GSD corpora: ```latex @misc{ universaldependencies, title={UniversalDependencies/UD_French-GSD}, url={https://github.com/UniversalDependencies/UD_French-GSD}, journal={GitHub}, author={UniversalDependencies} } ``` LIA TAGG: ```latex @techreport{LIA_TAGG, author = {Frédéric Béchet}, title = {LIA_TAGG: a statistical POS tagger + syntactic bracketer}, institution = {Aix-Marseille University & CNRS}, year = {2001} } ``` Flair Embeddings: ```latex @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` ## Acknowledgment This work was financially supported by [Zenidoc](https://zenidoc.fr/)
qanastek/pos-french-camembert
qanastek
2022-07-06T23:48:53Z
19
9
transformers
[ "transformers", "pytorch", "camembert", "token-classification", "Transformers", "sequence-tagger-model", "fr", "dataset:qanastek/ANTILLES", "arxiv:1911.03894", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-03-02T23:29:05Z
--- tags: - Transformers - token-classification - sequence-tagger-model language: fr datasets: - qanastek/ANTILLES widget: - text: "George Washington est allé à Washington" --- # POET: A French Extended Part-of-Speech Tagger - Corpora: [ANTILLES](https://github.com/qanastek/ANTILLES) - Embeddings & Sequence Labelling: [CamemBERT](https://arxiv.org/abs/1911.03894) - Number of Epochs: 115 **People Involved** * [LABRAK Yanis](https://www.linkedin.com/in/yanis-labrak-8a7412145/) (1) * [DUFOUR Richard](https://cv.archives-ouvertes.fr/richard-dufour) (2) **Affiliations** 1. [LIA, NLP team](https://lia.univ-avignon.fr/), Avignon University, Avignon, France. 2. [LS2N, TALN team](https://www.ls2n.fr/equipe/taln/), Nantes University, Nantes, France. ## Demo: How to use in HuggingFace Transformers Requires [transformers](https://pypi.org/project/transformers/): ```pip install transformers``` ```python from transformers import CamembertTokenizer, CamembertForTokenClassification, TokenClassificationPipeline tokenizer = CamembertTokenizer.from_pretrained('qanastek/pos-french-camembert') model = CamembertForTokenClassification.from_pretrained('qanastek/pos-french-camembert') pos = TokenClassificationPipeline(model=model, tokenizer=tokenizer) def make_prediction(sentence): labels = [l['entity'] for l in pos(sentence)] return list(zip(sentence.split(" "), labels)) res = make_prediction("George Washington est allé à Washington") ``` Output: ![Preview Output](preview.PNG) ## Training data `ANTILLES` is a part-of-speech tagging corpora based on [UD_French-GSD](https://universaldependencies.org/treebanks/fr_gsd/index.html) which was originally created in 2015 and is based on the [universal dependency treebank v2.0](https://github.com/ryanmcd/uni-dep-tb). Originally, the corpora consists of 400,399 words (16,341 sentences) and had 17 different classes. Now, after applying our tags augmentation we obtain 60 different classes which add linguistic and semantic information such as the gender, number, mood, person, tense or verb form given in the different CoNLL-03 fields from the original corpora. We based our tags on the level of details given by the [LIA_TAGG](http://pageperso.lif.univ-mrs.fr/frederic.bechet/download.html) statistical POS tagger written by [Frédéric Béchet](http://pageperso.lif.univ-mrs.fr/frederic.bechet/index-english.html) in 2001. The corpora used for this model is available on [Github](https://github.com/qanastek/ANTILLES) at the [CoNLL-U format](https://universaldependencies.org/format.html). Training data are fed to the model as free language and doesn't pass a normalization phase. Thus, it's made the model case and punctuation sensitive. ## Original Tags ```plain PRON VERB SCONJ ADP CCONJ DET NOUN ADJ AUX ADV PUNCT PROPN NUM SYM PART X INTJ ``` ## New additional POS tags | Abbreviation | Description | Examples | |:--------:|:--------:|:--------:| | PREP | Preposition | de | | AUX | Auxiliary Verb | est | | ADV | Adverb | toujours | | COSUB | Subordinating conjunction | que | | COCO | Coordinating Conjunction | et | | PART | Demonstrative particle | -t | | PRON | Pronoun | qui ce quoi | | PDEMMS | Demonstrative Pronoun - Singular Masculine | ce | | PDEMMP | Demonstrative Pronoun - Plural Masculine | ceux | | PDEMFS | Demonstrative Pronoun - Singular Feminine | cette | | PDEMFP | Demonstrative Pronoun - Plural Feminine | celles | | PINDMS | Indefinite Pronoun - Singular Masculine | tout | | PINDMP | Indefinite Pronoun - Plural Masculine | autres | | PINDFS | Indefinite Pronoun - Singular Feminine | chacune | | PINDFP | Indefinite Pronoun - Plural Feminine | certaines | | PROPN | Proper noun | Houston | | XFAMIL | Last name | Levy | | NUM | Numerical Adjective | trentaine vingtaine | | DINTMS | Masculine Numerical Adjective | un | | DINTFS | Feminine Numerical Adjective | une | | PPOBJMS | Pronoun complements of objects - Singular Masculine | le lui | | PPOBJMP | Pronoun complements of objects - Plural Masculine | eux y | | PPOBJFS | Pronoun complements of objects - Singular Feminine | moi la | | PPOBJFP | Pronoun complements of objects - Plural Feminine | en y | | PPER1S | Personal Pronoun First-Person - Singular | je | | PPER2S | Personal Pronoun Second-Person - Singular | tu | | PPER3MS | Personal Pronoun Third-Person - Singular Masculine | il | | PPER3MP | Personal Pronoun Third-Person - Plural Masculine | ils | | PPER3FS | Personal Pronoun Third-Person - Singular Feminine | elle | | PPER3FP | Personal Pronoun Third-Person - Plural Feminine | elles | | PREFS | Reflexive Pronoun First-Person - Singular | me m' | | PREF | Reflexive Pronoun Third-Person - Singular | se s' | | PREFP | Reflexive Pronoun First / Second-Person - Plural | nous vous | | VERB | Verb | obtient | | VPPMS | Past Participle - Singular Masculine | formulé | | VPPMP | Past Participle - Plural Masculine | classés | | VPPFS | Past Participle - Singular Feminine | appelée | | VPPFP | Past Participle - Plural Feminine | sanctionnées | | DET | Determinant | les l' | | DETMS | Determinant - Singular Masculine | les | | DETFS | Determinant - Singular Feminine | la | | ADJ | Adjective | capable sérieux | | ADJMS | Adjective - Singular Masculine | grand important | | ADJMP | Adjective - Plural Masculine | grands petits | | ADJFS | Adjective - Singular Feminine | française petite | | ADJFP | Adjective - Plural Feminine | légères petites | | NOUN | Noun | temps | | NMS | Noun - Singular Masculine | drapeau | | NMP | Noun - Plural Masculine | journalistes | | NFS | Noun - Singular Feminine | tête | | NFP | Noun - Plural Feminine | ondes | | PREL | Relative Pronoun | qui dont | | PRELMS | Relative Pronoun - Singular Masculine | lequel | | PRELMP | Relative Pronoun - Plural Masculine | lesquels | | PRELFS | Relative Pronoun - Singular Feminine | laquelle | | PRELFP | Relative Pronoun - Plural Feminine | lesquelles | | INTJ | Interjection | merci bref | | CHIF | Numbers | 1979 10 | | SYM | Symbol | € % | | YPFOR | Endpoint | . | | PUNCT | Ponctuation | : , | | MOTINC | Unknown words | Technology Lady | | X | Typos & others | sfeir 3D statu | ## Evaluation results The test corpora used for this evaluation is available on [Github](https://github.com/qanastek/ANTILLES/blob/main/ANTILLES/test.conllu). ```plain precision recall f1-score support ADJ 0.9040 0.8828 0.8933 128 ADJFP 0.9811 0.9585 0.9697 434 ADJFS 0.9606 0.9826 0.9715 918 ADJMP 0.9613 0.9357 0.9483 451 ADJMS 0.9561 0.9611 0.9586 952 ADV 0.9870 0.9948 0.9908 1524 AUX 0.9956 0.9964 0.9960 1124 CHIF 0.9798 0.9774 0.9786 1239 COCO 1.0000 0.9989 0.9994 884 COSUB 0.9939 0.9939 0.9939 328 DET 0.9972 0.9972 0.9972 2897 DETFS 0.9990 1.0000 0.9995 1007 DETMS 1.0000 0.9993 0.9996 1426 DINTFS 0.9967 0.9902 0.9934 306 DINTMS 0.9923 0.9948 0.9935 387 INTJ 0.8000 0.8000 0.8000 5 MOTINC 0.5049 0.5827 0.5410 266 NFP 0.9807 0.9675 0.9740 892 NFS 0.9778 0.9699 0.9738 2588 NMP 0.9687 0.9495 0.9590 1367 NMS 0.9759 0.9560 0.9659 3181 NOUN 0.6164 0.8673 0.7206 113 NUM 0.6250 0.8333 0.7143 6 PART 1.0000 0.9375 0.9677 16 PDEMFP 1.0000 1.0000 1.0000 3 PDEMFS 1.0000 1.0000 1.0000 89 PDEMMP 1.0000 1.0000 1.0000 20 PDEMMS 1.0000 1.0000 1.0000 222 PINDFP 1.0000 1.0000 1.0000 3 PINDFS 0.8571 1.0000 0.9231 12 PINDMP 0.9000 1.0000 0.9474 9 PINDMS 0.9286 0.9701 0.9489 67 PINTFS 0.0000 0.0000 0.0000 2 PPER1S 1.0000 1.0000 1.0000 62 PPER2S 0.7500 1.0000 0.8571 3 PPER3FP 1.0000 1.0000 1.0000 9 PPER3FS 1.0000 1.0000 1.0000 96 PPER3MP 1.0000 1.0000 1.0000 31 PPER3MS 1.0000 1.0000 1.0000 377 PPOBJFP 1.0000 0.7500 0.8571 4 PPOBJFS 0.9167 0.8919 0.9041 37 PPOBJMP 0.7500 0.7500 0.7500 12 PPOBJMS 0.9371 0.9640 0.9504 139 PREF 1.0000 1.0000 1.0000 332 PREFP 1.0000 1.0000 1.0000 64 PREFS 1.0000 1.0000 1.0000 13 PREL 0.9964 0.9964 0.9964 277 PRELFP 1.0000 1.0000 1.0000 5 PRELFS 0.8000 1.0000 0.8889 4 PRELMP 1.0000 1.0000 1.0000 3 PRELMS 1.0000 1.0000 1.0000 11 PREP 0.9971 0.9977 0.9974 6161 PRON 0.9836 0.9836 0.9836 61 PROPN 0.9468 0.9503 0.9486 4310 PUNCT 1.0000 1.0000 1.0000 4019 SYM 0.9394 0.8158 0.8732 76 VERB 0.9956 0.9921 0.9938 2273 VPPFP 0.9145 0.9469 0.9304 113 VPPFS 0.9562 0.9597 0.9580 273 VPPMP 0.8827 0.9728 0.9256 147 VPPMS 0.9778 0.9794 0.9786 630 VPPRE 0.0000 0.0000 0.0000 1 X 0.9604 0.9935 0.9766 1073 XFAMIL 0.9386 0.9113 0.9248 1342 YPFOR 1.0000 1.0000 1.0000 2750 accuracy 0.9778 47574 macro avg 0.9151 0.9285 0.9202 47574 weighted avg 0.9785 0.9778 0.9780 47574 ``` ## BibTeX Citations Please cite the following paper when using this model. ANTILLES corpus and POET taggers: ```latex @inproceedings{labrak:hal-03696042, TITLE = {{ANTILLES: An Open French Linguistically Enriched Part-of-Speech Corpus}}, AUTHOR = {Labrak, Yanis and Dufour, Richard}, URL = {https://hal.archives-ouvertes.fr/hal-03696042}, BOOKTITLE = {{25th International Conference on Text, Speech and Dialogue (TSD)}}, ADDRESS = {Brno, Czech Republic}, PUBLISHER = {{Springer}}, YEAR = {2022}, MONTH = Sep, KEYWORDS = {Part-of-speech corpus ; POS tagging ; Open tools ; Word embeddings ; Bi-LSTM ; CRF ; Transformers}, PDF = {https://hal.archives-ouvertes.fr/hal-03696042/file/ANTILLES_A_freNch_linguisTIcaLLy_Enriched_part_of_Speech_corpus.pdf}, HAL_ID = {hal-03696042}, HAL_VERSION = {v1}, } ``` UD_French-GSD corpora: ```latex @misc{ universaldependencies, title={UniversalDependencies/UD_French-GSD}, url={https://github.com/UniversalDependencies/UD_French-GSD}, journal={GitHub}, author={UniversalDependencies} } ``` LIA TAGG: ```latex @techreport{LIA_TAGG, author = {Frédéric Béchet}, title = {LIA_TAGG: a statistical POS tagger + syntactic bracketer}, institution = {Aix-Marseille University & CNRS}, year = {2001} } ``` Flair Embeddings: ```latex @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` ## Acknowledgment This work was financially supported by [Zenidoc](https://zenidoc.fr/)
BigTimeCoderSean/q-FrozenLake-v1-4x4-noSlippery
BigTimeCoderSean
2022-07-06T17:57:12Z
0
0
null
[ "FrozenLake-v1-4x4", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-07-06T17:57:05Z
--- tags: - FrozenLake-v1-4x4 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - metrics: - type: mean_reward value: 0.74 +/- 0.44 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4 type: FrozenLake-v1-4x4 --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="BigTimeCoderSean/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
huggingtweets/zanza47
huggingtweets
2022-07-06T16:45:17Z
3
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "huggingtweets", "en", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2022-07-06T16:21:32Z
--- language: en thumbnail: http://www.huggingtweets.com/zanza47/1657125860989/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1312214716941393920/sX37K0us_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Detective Zanza (Commissions! 1/3 full)</div> <div style="text-align: center; font-size: 14px;">@zanza47</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Detective Zanza (Commissions! 1/3 full). | Data | Detective Zanza (Commissions! 1/3 full) | | --- | --- | | Tweets downloaded | 3242 | | Retweets | 1157 | | Short tweets | 284 | | Tweets kept | 1801 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/383lput2/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @zanza47's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/dipzmx4r) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/dipzmx4r/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/zanza47') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
hsohn3/mayo-bert-visit-uncased-wordlevel-block512-batch4-ep10
hsohn3
2022-07-06T15:57:53Z
4
0
transformers
[ "transformers", "tf", "bert", "fill-mask", "generated_from_keras_callback", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-07-06T14:22:52Z
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: hsohn3/mayo-bert-visit-uncased-wordlevel-block512-batch4-ep10 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # hsohn3/mayo-bert-visit-uncased-wordlevel-block512-batch4-ep10 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 3.2895 - Epoch: 9 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 2e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Epoch | |:----------:|:-----:| | 4.1298 | 0 | | 3.5157 | 1 | | 3.4732 | 2 | | 3.4565 | 3 | | 3.4444 | 4 | | 3.4349 | 5 | | 3.4197 | 6 | | 3.4109 | 7 | | 3.3493 | 8 | | 3.2895 | 9 | ### Framework versions - Transformers 4.20.1 - TensorFlow 2.8.2 - Datasets 2.3.2 - Tokenizers 0.12.1
justinwilloughby/ppo-LunarLander-v2
justinwilloughby
2022-07-06T15:31:07Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-07-05T20:44:15Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 256.08 +/- 27.96 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
cacauvicosa/heart1ohr2x9e-target-classification
cacauvicosa
2022-07-06T15:11:05Z
0
0
sklearn
[ "sklearn", "tabular-classification", "baseline-trainer", "license:apache-2.0", "region:us" ]
tabular-classification
2022-07-06T15:11:03Z
--- license: apache-2.0 library_name: sklearn tags: - tabular-classification - baseline-trainer --- ## Baseline Model trained on heart1ohr2x9e to apply classification on target **Metrics of the best model:** accuracy 0.885854 average_precision 0.949471 roc_auc 0.050633 recall_macro 0.885324 f1_macro 0.885610 Name: LogisticRegression(class_weight='balanced', max_iter=1000), dtype: float64 **See model plot below:** <style>#sk-container-id-8 {color: black;background-color: white;}#sk-container-id-8 pre{padding: 0;}#sk-container-id-8 div.sk-toggleable {background-color: white;}#sk-container-id-8 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-8 label.sk-toggleable__label-arrow:before {content: "▸";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-8 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-8 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-8 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-8 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-8 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-8 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: "▾";}#sk-container-id-8 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-8 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-8 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-8 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-8 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-8 div.sk-parallel-item::after {content: "";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-8 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-8 div.sk-serial::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-8 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-8 div.sk-item {position: relative;z-index: 1;}#sk-container-id-8 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-8 div.sk-item::before, #sk-container-id-8 div.sk-parallel-item::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-8 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-8 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-8 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-8 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-8 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-8 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-8 div.sk-label-container {text-align: center;}#sk-container-id-8 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-8 div.sk-text-repr-fallback {display: none;}</style><div id="sk-container-id-8" class="sk-top-container"><div class="sk-text-repr-fallback"><pre>Pipeline(steps=[(&#x27;easypreprocessor&#x27;,EasyPreprocessor(types= continuous dirty_float low_card_int ... date free_string useless age False False False ... False False False sex False False False ... False False False cp False False False ... False False False trestbps True False False ... False False False chol True False False ... False False False fbs False False False ... False False False restecg False Fa...... False False False thalach True False False ... False False False exang False False False ... False False False oldpeak True False False ... False False False slope False False False ... False False False ca False False False ... False False False thal False False False ... False False False[13 rows x 7 columns])),(&#x27;logisticregression&#x27;,LogisticRegression(C=1, class_weight=&#x27;balanced&#x27;,max_iter=1000))])</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class="sk-container" hidden><div class="sk-item sk-dashed-wrapped"><div class="sk-label-container"><div class="sk-label sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-24" type="checkbox" ><label for="sk-estimator-id-24" class="sk-toggleable__label sk-toggleable__label-arrow">Pipeline</label><div class="sk-toggleable__content"><pre>Pipeline(steps=[(&#x27;easypreprocessor&#x27;,EasyPreprocessor(types= continuous dirty_float low_card_int ... date free_string useless age False False False ... False False False sex False False False ... False False False cp False False False ... False False False trestbps True False False ... False False False chol True False False ... False False False fbs False False False ... False False False restecg False Fa...... False False False thalach True False False ... False False False exang False False False ... False False False oldpeak True False False ... False False False slope False False False ... False False False ca False False False ... False False False thal False False False ... False False False[13 rows x 7 columns])),(&#x27;logisticregression&#x27;,LogisticRegression(C=1, class_weight=&#x27;balanced&#x27;,max_iter=1000))])</pre></div></div></div><div class="sk-serial"><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-25" type="checkbox" ><label for="sk-estimator-id-25" class="sk-toggleable__label sk-toggleable__label-arrow">EasyPreprocessor</label><div class="sk-toggleable__content"><pre>EasyPreprocessor(types= continuous dirty_float low_card_int ... date free_string useless age False False False ... False False False sex False False False ... False False False cp False False False ... False False False trestbps True False False ... False False False chol True False False ... False False False fbs False False False ... False False False restecg False False False ... False False False thalach True False False ... False False False exang False False False ... False False False oldpeak True False False ... False False False slope False False False ... False False False ca False False False ... False False False thal False False False ... False False False[13 rows x 7 columns])</pre></div></div></div><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-26" type="checkbox" ><label for="sk-estimator-id-26" class="sk-toggleable__label sk-toggleable__label-arrow">LogisticRegression</label><div class="sk-toggleable__content"><pre>LogisticRegression(C=1, class_weight=&#x27;balanced&#x27;, max_iter=1000)</pre></div></div></div></div></div></div></div> **Disclaimer:** This model is trained with dabl library as a baseline, for better results, use [AutoTrain](https://huggingface.co/autotrain). **Logs of training** including the models tried in the process can be found in logs.txt
bothrajat/q-FrozenLake-v1-8x8-noSlippery
bothrajat
2022-07-06T14:50:19Z
0
0
null
[ "FrozenLake-v1-8x8-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-07-06T14:50:11Z
--- tags: - FrozenLake-v1-8x8-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-8x8-noSlippery results: - metrics: - type: mean_reward value: 0.00 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-8x8-no_slippery type: FrozenLake-v1-8x8-no_slippery --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="bothrajat/q-FrozenLake-v1-8x8-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
bothrajat/q-FrozenLake-v1-4x4-noSlippery
bothrajat
2022-07-06T14:34:51Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-07-04T12:45:20Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="bothrajat/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
huggingtweets/frnsw-nswrfs-nswses
huggingtweets
2022-07-06T14:32:52Z
3
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "huggingtweets", "en", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2022-07-06T14:32:45Z
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1150678663265832960/ujqrCyuu_400x400.png&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/895892720194957313/RVLTWlDI_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1500778204294180868/3B6rKocs_400x400.jpg&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">NSW RFS & NSW SES & Fire and Rescue NSW</div> <div style="text-align: center; font-size: 14px;">@frnsw-nswrfs-nswses</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from NSW RFS & NSW SES & Fire and Rescue NSW. | Data | NSW RFS | NSW SES | Fire and Rescue NSW | | --- | --- | --- | --- | | Tweets downloaded | 3250 | 3248 | 3249 | | Retweets | 275 | 2093 | 875 | | Short tweets | 12 | 12 | 48 | | Tweets kept | 2963 | 1143 | 2326 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1cxt6027/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @frnsw-nswrfs-nswses's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/tjbhow2z) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/tjbhow2z/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/frnsw-nswrfs-nswses') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
TestZee/t5-small-finetuned-custom-wion-test
TestZee
2022-07-06T13:28:44Z
4
0
transformers
[ "transformers", "tf", "t5", "text2text-generation", "generated_from_keras_callback", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text2text-generation
2022-07-06T13:23:31Z
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: TestZee/t5-small-finetuned-custom-wion-test results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # TestZee/t5-small-finetuned-custom-wion-test This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.9773 - Validation Loss: 0.8028 - Epoch: 9 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 2e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 2.2933 | 0.9052 | 0 | | 2.3077 | 0.8923 | 1 | | 2.1972 | 0.8797 | 2 | | 2.1740 | 0.8677 | 3 | | 2.1535 | 0.8564 | 4 | | 2.1772 | 0.8452 | 5 | | 2.1227 | 0.8342 | 6 | | 2.0875 | 0.8234 | 7 | | 2.0279 | 0.8129 | 8 | | 1.9773 | 0.8028 | 9 | ### Framework versions - Transformers 4.20.1 - TensorFlow 2.8.2 - Tokenizers 0.12.1
luizapzbn/titanicht_mp88q-Survived-classification
luizapzbn
2022-07-06T13:25:48Z
0
0
sklearn
[ "sklearn", "tabular-classification", "baseline-trainer", "license:apache-2.0", "region:us" ]
tabular-classification
2022-07-06T13:25:46Z
--- license: apache-2.0 library_name: sklearn tags: - tabular-classification - baseline-trainer --- ## Baseline Model trained on titanicht_mp88q to apply classification on Survived **Metrics of the best model:** accuracy 0.803597 average_precision 0.801332 roc_auc 0.848079 recall_macro 0.795883 f1_macro 0.793746 Name: DecisionTreeClassifier(class_weight='balanced', max_depth=5), dtype: float64 **See model plot below:** <style>#sk-container-id-7 {color: black;background-color: white;}#sk-container-id-7 pre{padding: 0;}#sk-container-id-7 div.sk-toggleable {background-color: white;}#sk-container-id-7 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-7 label.sk-toggleable__label-arrow:before {content: "▸";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-7 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-7 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-7 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-7 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-7 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-7 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: "▾";}#sk-container-id-7 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-7 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-7 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-7 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-7 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-7 div.sk-parallel-item::after {content: "";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-7 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-7 div.sk-serial::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-7 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-7 div.sk-item {position: relative;z-index: 1;}#sk-container-id-7 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-7 div.sk-item::before, #sk-container-id-7 div.sk-parallel-item::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-7 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-7 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-7 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-7 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-7 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-7 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-7 div.sk-label-container {text-align: center;}#sk-container-id-7 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-7 div.sk-text-repr-fallback {display: none;}</style><div id="sk-container-id-7" class="sk-top-container"><div class="sk-text-repr-fallback"><pre>Pipeline(steps=[(&#x27;easypreprocessor&#x27;,EasyPreprocessor(types= continuous dirty_float low_card_int ... date free_string useless Pclass False False False ... False False False Name False False False ... False True False Sex False False False ... False False False Age True False False ... False False False SibSp False False False ... False False False Parch False False False ... False False False Ticket False False False ... False True False Fare True False False ... False False False Cabin False False False ... False True False Embarked False False False ... False False False[10 rows x 7 columns])),(&#x27;decisiontreeclassifier&#x27;,DecisionTreeClassifier(class_weight=&#x27;balanced&#x27;, max_depth=5))])</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class="sk-container" hidden><div class="sk-item sk-dashed-wrapped"><div class="sk-label-container"><div class="sk-label sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-21" type="checkbox" ><label for="sk-estimator-id-21" class="sk-toggleable__label sk-toggleable__label-arrow">Pipeline</label><div class="sk-toggleable__content"><pre>Pipeline(steps=[(&#x27;easypreprocessor&#x27;,EasyPreprocessor(types= continuous dirty_float low_card_int ... date free_string useless Pclass False False False ... False False False Name False False False ... False True False Sex False False False ... False False False Age True False False ... False False False SibSp False False False ... False False False Parch False False False ... False False False Ticket False False False ... False True False Fare True False False ... False False False Cabin False False False ... False True False Embarked False False False ... False False False[10 rows x 7 columns])),(&#x27;decisiontreeclassifier&#x27;,DecisionTreeClassifier(class_weight=&#x27;balanced&#x27;, max_depth=5))])</pre></div></div></div><div class="sk-serial"><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-22" type="checkbox" ><label for="sk-estimator-id-22" class="sk-toggleable__label sk-toggleable__label-arrow">EasyPreprocessor</label><div class="sk-toggleable__content"><pre>EasyPreprocessor(types= continuous dirty_float low_card_int ... date free_string useless Pclass False False False ... False False False Name False False False ... False True False Sex False False False ... False False False Age True False False ... False False False SibSp False False False ... False False False Parch False False False ... False False False Ticket False False False ... False True False Fare True False False ... False False False Cabin False False False ... False True False Embarked False False False ... False False False[10 rows x 7 columns])</pre></div></div></div><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-23" type="checkbox" ><label for="sk-estimator-id-23" class="sk-toggleable__label sk-toggleable__label-arrow">DecisionTreeClassifier</label><div class="sk-toggleable__content"><pre>DecisionTreeClassifier(class_weight=&#x27;balanced&#x27;, max_depth=5)</pre></div></div></div></div></div></div></div> **Disclaimer:** This model is trained with dabl library as a baseline, for better results, use [AutoTrain](https://huggingface.co/autotrain). **Logs of training** including the models tried in the process can be found in logs.txt
srg/outhimar_64-Close-regression
srg
2022-07-06T12:33:04Z
0
4
sklearn
[ "sklearn", "tabular-regression", "baseline-trainer", "license:apache-2.0", "region:us" ]
tabular-regression
2022-07-06T12:33:02Z
--- license: apache-2.0 library_name: sklearn tags: - tabular-regression - baseline-trainer --- ## Baseline Model trained on outhimar_64 to apply regression on Close **Metrics of the best model:** r2 0.999858 neg_mean_squared_error -1.067685 Name: Ridge(alpha=10), dtype: float64 **See model plot below:** <style>#sk-container-id-6 {color: black;background-color: white;}#sk-container-id-6 pre{padding: 0;}#sk-container-id-6 div.sk-toggleable {background-color: white;}#sk-container-id-6 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-6 label.sk-toggleable__label-arrow:before {content: "▸";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-6 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-6 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-6 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-6 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-6 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-6 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: "▾";}#sk-container-id-6 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-6 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-6 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-6 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-6 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-6 div.sk-parallel-item::after {content: "";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-6 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-6 div.sk-serial::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-6 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-6 div.sk-item {position: relative;z-index: 1;}#sk-container-id-6 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-6 div.sk-item::before, #sk-container-id-6 div.sk-parallel-item::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-6 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-6 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-6 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-6 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-6 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-6 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-6 div.sk-label-container {text-align: center;}#sk-container-id-6 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-6 div.sk-text-repr-fallback {display: none;}</style><div id="sk-container-id-6" class="sk-top-container"><div class="sk-text-repr-fallback"><pre>Pipeline(steps=[(&#x27;easypreprocessor&#x27;,EasyPreprocessor(types= continuous dirty_float low_card_int ... date free_string useless Date False False False ... True False False Open True False False ... False False False High True False False ... False False False Low True False False ... False False False Adj Close True False False ... False False False Volume True False False ... False False False[6 rows x 7 columns])),(&#x27;ridge&#x27;, Ridge(alpha=10))])</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class="sk-container" hidden><div class="sk-item sk-dashed-wrapped"><div class="sk-label-container"><div class="sk-label sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-18" type="checkbox" ><label for="sk-estimator-id-18" class="sk-toggleable__label sk-toggleable__label-arrow">Pipeline</label><div class="sk-toggleable__content"><pre>Pipeline(steps=[(&#x27;easypreprocessor&#x27;,EasyPreprocessor(types= continuous dirty_float low_card_int ... date free_string useless Date False False False ... True False False Open True False False ... False False False High True False False ... False False False Low True False False ... False False False Adj Close True False False ... False False False Volume True False False ... False False False[6 rows x 7 columns])),(&#x27;ridge&#x27;, Ridge(alpha=10))])</pre></div></div></div><div class="sk-serial"><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-19" type="checkbox" ><label for="sk-estimator-id-19" class="sk-toggleable__label sk-toggleable__label-arrow">EasyPreprocessor</label><div class="sk-toggleable__content"><pre>EasyPreprocessor(types= continuous dirty_float low_card_int ... date free_string useless Date False False False ... True False False Open True False False ... False False False High True False False ... False False False Low True False False ... False False False Adj Close True False False ... False False False Volume True False False ... False False False[6 rows x 7 columns])</pre></div></div></div><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-20" type="checkbox" ><label for="sk-estimator-id-20" class="sk-toggleable__label sk-toggleable__label-arrow">Ridge</label><div class="sk-toggleable__content"><pre>Ridge(alpha=10)</pre></div></div></div></div></div></div></div> **Disclaimer:** This model is trained with dabl library as a baseline, for better results, use [AutoTrain](https://huggingface.co/autotrain). **Logs of training** including the models tried in the process can be found in logs.txt
sumitrsch/Indic-bert_multiconer22_bn
sumitrsch
2022-07-06T12:32:40Z
3
2
transformers
[ "transformers", "pytorch", "albert", "token-classification", "license:afl-3.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-07-06T10:07:47Z
--- license: afl-3.0 --- Put this model path in variable best_model_path in first cell of given colab notebook for testing semeval multiconer task for bangla track. https://colab.research.google.com/drive/1P9827acdS7i6eZTi4B0cOms5qLREqvUO
sumitrsch/xlm_R_large_multiconer22_hi
sumitrsch
2022-07-06T12:26:37Z
3
2
transformers
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "license:afl-3.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-07-06T09:04:53Z
--- license: afl-3.0 --- Put this model path in variable best_model_path in first cell of given colab notebook for testing semeval multiconer task. https://colab.research.google.com/drive/17WyqwdoRNnzImeik6wTRE5uuj9QQnkXA#scrollTo=nYtUtmyDFAqP
sumitrsch/mbert_multiconer22_hi
sumitrsch
2022-07-06T12:25:50Z
3
1
transformers
[ "transformers", "pytorch", "bert", "token-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-07-06T10:49:17Z
Put this model path in variable best_model_path in first cell of given colab notebook for testing semeval multiconer task. https://colab.research.google.com/drive/17WyqwdoRNnzImeik6wTRE5uuj9QQnkXA#scrollTo=nYtUtmyDFAqP
dandelin/vilt-b32-mlm
dandelin
2022-07-06T12:18:37Z
66,336
11
transformers
[ "transformers", "pytorch", "vilt", "fill-mask", "arxiv:2102.03334", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-03-02T23:29:05Z
--- license: apache-2.0 --- # Vision-and-Language Transformer (ViLT), pre-trained only Vision-and-Language Transformer (ViLT) model pre-trained on GCC+SBU+COCO+VG (200k steps). It was introduced in the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Kim et al. and first released in [this repository](https://github.com/dandelin/ViLT). Note: this model only includes the language modeling head. Disclaimer: The team releasing ViLT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Intended uses & limitations You can use the raw model for masked language modeling given an image and a piece of text with [MASK] tokens. ### How to use Here is how to use this model in PyTorch: ``` from transformers import ViltProcessor, ViltForMaskedLM import requests from PIL import Image import re url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) text = "a bunch of [MASK] laying on a [MASK]." processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-mlm") model = ViltForMaskedLM.from_pretrained("dandelin/vilt-b32-mlm") # prepare inputs encoding = processor(image, text, return_tensors="pt") # forward pass outputs = model(**encoding) tl = len(re.findall("\[MASK\]", text)) inferred_token = [text] # gradually fill in the MASK tokens, one by one with torch.no_grad(): for i in range(tl): encoded = processor.tokenizer(inferred_token) input_ids = torch.tensor(encoded.input_ids).to(device) encoded = encoded["input_ids"][0][1:-1] outputs = model(input_ids=input_ids, pixel_values=pixel_values) mlm_logits = outputs.logits[0] # shape (seq_len, vocab_size) # only take into account text features (minus CLS and SEP token) mlm_logits = mlm_logits[1 : input_ids.shape[1] - 1, :] mlm_values, mlm_ids = mlm_logits.softmax(dim=-1).max(dim=-1) # only take into account text mlm_values[torch.tensor(encoded) != 103] = 0 select = mlm_values.argmax().item() encoded[select] = mlm_ids[select].item() inferred_token = [processor.decode(encoded)] selected_token = "" encoded = processor.tokenizer(inferred_token) processor.decode(encoded.input_ids[0], skip_special_tokens=True) ``` ## Training data (to do) ## Training procedure ### Preprocessing (to do) ### Pretraining (to do) ## Evaluation results (to do) ### BibTeX entry and citation info ```bibtex @misc{kim2021vilt, title={ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision}, author={Wonjae Kim and Bokyung Son and Ildoo Kim}, year={2021}, eprint={2102.03334}, archivePrefix={arXiv}, primaryClass={stat.ML} } ```
SiddharthaM/beit-base-patch16-224-pt22k-ft22k-rim_one-new
SiddharthaM
2022-07-06T11:17:32Z
55
0
transformers
[ "transformers", "pytorch", "tensorboard", "beit", "image-classification", "generated_from_trainer", "dataset:imagefolder", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2022-07-06T10:31:21Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: beit-base-patch16-224-pt22k-ft22k-rim_one-new results: - task: type: image-classification name: Image Classification dataset: type: rimonedl name: RIM ONE DL split: test metrics: - type: f1 value: 0.9197860962566845 name: F1 - task: type: image-classification name: Image Classification dataset: type: rim one name: RIMONEDL split: test metrics: - type: precision value: 0.9247311827956989 name: precision - type: recall value: 0.9148936170212766 name: Recall - type: accuracy value: 0.8972602739726028 name: Accuracy - type: roc_auc value: 0.8901391162029461 name: AUC --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-base-patch16-224-pt22k-ft22k-rim_one-new This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.4550 - Accuracy: 0.8767 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 0.73 | 2 | 0.2411 | 0.9178 | | No log | 1.73 | 4 | 0.2182 | 0.8973 | | No log | 2.73 | 6 | 0.3085 | 0.8973 | | No log | 3.73 | 8 | 0.2794 | 0.8973 | | 0.1392 | 4.73 | 10 | 0.2398 | 0.9110 | | 0.1392 | 5.73 | 12 | 0.2925 | 0.8973 | | 0.1392 | 6.73 | 14 | 0.2798 | 0.9110 | | 0.1392 | 7.73 | 16 | 0.2184 | 0.9178 | | 0.1392 | 8.73 | 18 | 0.3007 | 0.9110 | | 0.0416 | 9.73 | 20 | 0.3344 | 0.9041 | | 0.0416 | 10.73 | 22 | 0.3626 | 0.9110 | | 0.0416 | 11.73 | 24 | 0.4842 | 0.8904 | | 0.0416 | 12.73 | 26 | 0.3664 | 0.8973 | | 0.0416 | 13.73 | 28 | 0.3458 | 0.9110 | | 0.0263 | 14.73 | 30 | 0.2810 | 0.9110 | | 0.0263 | 15.73 | 32 | 0.4695 | 0.8699 | | 0.0263 | 16.73 | 34 | 0.3723 | 0.9041 | | 0.0263 | 17.73 | 36 | 0.3447 | 0.9041 | | 0.0263 | 18.73 | 38 | 0.3708 | 0.8904 | | 0.0264 | 19.73 | 40 | 0.4052 | 0.9110 | | 0.0264 | 20.73 | 42 | 0.4492 | 0.9041 | | 0.0264 | 21.73 | 44 | 0.4649 | 0.8904 | | 0.0264 | 22.73 | 46 | 0.4061 | 0.9178 | | 0.0264 | 23.73 | 48 | 0.4136 | 0.9110 | | 0.0139 | 24.73 | 50 | 0.4183 | 0.8973 | | 0.0139 | 25.73 | 52 | 0.4504 | 0.8904 | | 0.0139 | 26.73 | 54 | 0.4368 | 0.8973 | | 0.0139 | 27.73 | 56 | 0.4711 | 0.9110 | | 0.0139 | 28.73 | 58 | 0.3928 | 0.9110 | | 0.005 | 29.73 | 60 | 0.4550 | 0.8767 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
paola-md/recipe-test
paola-md
2022-07-06T10:32:13Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "distilbert", "fill-mask", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-07-06T10:27:36Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: recipe-test results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # recipe-test This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.9583 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 3.3675 | 1.0 | 16 | 3.0009 | | 3.0062 | 2.0 | 32 | 2.9583 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
merve/breast_cancernb8gjv4n-diagnosis-classification
merve
2022-07-06T10:28:04Z
0
2
sklearn
[ "sklearn", "tabular-classification", "baseline-trainer", "license:apache-2.0", "region:us" ]
tabular-classification
2022-07-06T10:28:02Z
--- license: apache-2.0 library_name: sklearn tags: - tabular-classification - baseline-trainer --- ## Baseline Model trained on breast_cancernb8gjv4n to apply classification on diagnosis **Metrics of the best model:** accuracy 0.978932 average_precision 0.994309 roc_auc 0.995448 recall_macro 0.976607 f1_macro 0.977365 Name: LogisticRegression(C=0.1, class_weight='balanced', max_iter=1000), dtype: float64 **See model plot below:** <style>#sk-container-id-4 {color: black;background-color: white;}#sk-container-id-4 pre{padding: 0;}#sk-container-id-4 div.sk-toggleable {background-color: white;}#sk-container-id-4 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-4 label.sk-toggleable__label-arrow:before {content: "▸";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-4 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-4 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-4 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-4 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-4 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-4 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: "▾";}#sk-container-id-4 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-4 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-4 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-4 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-4 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-4 div.sk-parallel-item::after {content: "";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-4 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-4 div.sk-serial::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-4 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-4 div.sk-item {position: relative;z-index: 1;}#sk-container-id-4 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-4 div.sk-item::before, #sk-container-id-4 div.sk-parallel-item::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-4 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-4 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-4 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-4 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-4 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-4 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-4 div.sk-label-container {text-align: center;}#sk-container-id-4 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-4 div.sk-text-repr-fallback {display: none;}</style><div id="sk-container-id-4" class="sk-top-container"><div class="sk-text-repr-fallback"><pre>Pipeline(steps=[(&#x27;easypreprocessor&#x27;,EasyPreprocessor(types= continuous dirty_float ... free_string useless id True False ... False False radius_mean True False ... False False texture_mean True False ... False False perimeter_mean True False ... False False area_mean True False ... False False smoothness_mean True False ... False False compactness_mean True False ... False False concavity_mean Tr... area_worst True False ... False False smoothness_worst True False ... False False compactness_worst True False ... False False concavity_worst True False ... False False concave points_worst True False ... False False symmetry_worst True False ... False False fractal_dimension_worst True False ... False False[31 rows x 7 columns])),(&#x27;logisticregression&#x27;,LogisticRegression(C=0.1, class_weight=&#x27;balanced&#x27;,max_iter=1000))])</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class="sk-container" hidden><div class="sk-item sk-dashed-wrapped"><div class="sk-label-container"><div class="sk-label sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-12" type="checkbox" ><label for="sk-estimator-id-12" class="sk-toggleable__label sk-toggleable__label-arrow">Pipeline</label><div class="sk-toggleable__content"><pre>Pipeline(steps=[(&#x27;easypreprocessor&#x27;,EasyPreprocessor(types= continuous dirty_float ... free_string useless id True False ... False False radius_mean True False ... False False texture_mean True False ... False False perimeter_mean True False ... False False area_mean True False ... False False smoothness_mean True False ... False False compactness_mean True False ... False False concavity_mean Tr... area_worst True False ... False False smoothness_worst True False ... False False compactness_worst True False ... False False concavity_worst True False ... False False concave points_worst True False ... False False symmetry_worst True False ... False False fractal_dimension_worst True False ... False False[31 rows x 7 columns])),(&#x27;logisticregression&#x27;,LogisticRegression(C=0.1, class_weight=&#x27;balanced&#x27;,max_iter=1000))])</pre></div></div></div><div class="sk-serial"><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-13" type="checkbox" ><label for="sk-estimator-id-13" class="sk-toggleable__label sk-toggleable__label-arrow">EasyPreprocessor</label><div class="sk-toggleable__content"><pre>EasyPreprocessor(types= continuous dirty_float ... free_string useless id True False ... False False radius_mean True False ... False False texture_mean True False ... False False perimeter_mean True False ... False False area_mean True False ... False False smoothness_mean True False ... False False compactness_mean True False ... False False concavity_mean True False ... False False concave points_me... texture_worst True False ... False False perimeter_worst True False ... False False area_worst True False ... False False smoothness_worst True False ... False False compactness_worst True False ... False False concavity_worst True False ... False False concave points_worst True False ... False False symmetry_worst True False ... False False fractal_dimension_worst True False ... False False[31 rows x 7 columns])</pre></div></div></div><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-14" type="checkbox" ><label for="sk-estimator-id-14" class="sk-toggleable__label sk-toggleable__label-arrow">LogisticRegression</label><div class="sk-toggleable__content"><pre>LogisticRegression(C=0.1, class_weight=&#x27;balanced&#x27;, max_iter=1000)</pre></div></div></div></div></div></div></div> **Disclaimer:** This model is trained with dabl library as a baseline, for better results, use [AutoTrain](https://huggingface.co/autotrain). **Logs of training** including the models tried in the process can be found in logs.txt
kws/q-Taxi-v3
kws
2022-07-06T10:24:02Z
0
0
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-07-06T10:23:57Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 --- # **Q-Learning** Agent playing **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="kws/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
kws/q-FrozenLake-v1-4x4-noSlippery
kws
2022-07-06T09:59:39Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2022-07-06T09:59:33Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="kws/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
dnouri/brats_mri_segmentation
dnouri
2022-07-06T09:54:53Z
0
1
null
[ "monai", "arxiv:1810.11654", "region:us" ]
null
2022-07-06T09:13:12Z
--- tags: - monai --- # Model Overview A pre-trained model for volumetric (3D) segmentation of brain tumor subregions from multimodal MRIs based on BraTS 2018 data. The whole pipeline is modified from [clara_pt_brain_mri_segmentation](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/med/models/clara_pt_brain_mri_segmentation). ## Workflow The model is trained to segment 3 nested subregions of primary brain tumors (gliomas): the "enhancing tumor" (ET), the "tumor core" (TC), the "whole tumor" (WT) based on 4 aligned input MRI scans (T1c, T1, T2, FLAIR). - The ET is described by areas that show hyper intensity in T1c when compared to T1, but also when compared to "healthy" white matter in T1c. - The TC describes the bulk of the tumor, which is what is typically resected. The TC entails the ET, as well as the necrotic (fluid-filled) and the non-enhancing (solid) parts of the tumor. - The WT describes the complete extent of the disease, as it entails the TC and the peritumoral edema (ED), which is typically depicted by hyper-intense signal in FLAIR. ## Data The training data is from the [Multimodal Brain Tumor Segmentation Challenge (BraTS) 2018](https://www.med.upenn.edu/sbia/brats2018/data.html). - Target: 3 tumor subregions - Task: Segmentation - Modality: MRI - Size: 285 3D volumes (4 channels each) The provided labelled data was partitioned, based on our own split, into training (200 studies), validation (42 studies) and testing (43 studies) datasets. Please run `scripts/prepare_datalist.py` to produce the data list. The command is like: ``` python scripts/prepare_datalist.py --path your-brats18-dataset-path ``` ## Training configuration This model utilized a similar approach described in 3D MRI brain tumor segmentation using autoencoder regularization, which was a winning method in BraTS2018 [1]. The training was performed with the following: - GPU: At least 16GB of GPU memory. - Actual Model Input: 224 x 224 x 144 - AMP: True - Optimizer: Adam - Learning Rate: 1e-4 - Loss: DiceLoss ## Input Input: 4 channel MRI (4 aligned MRIs T1c, T1, T2, FLAIR at 1x1x1 mm) 1. Normalizing to unit std with zero mean 2. Randomly cropping to (224, 224, 144) 3. Randomly spatial flipping 4. Randomly scaling and shifting intensity of the volume ## Output Output: 3 channels - Label 0: TC tumor subregion - Label 1: WT tumor subregion - Label 2: ET tumor subregion ## Model Performance The achieved Dice scores on the validation data are: - Tumor core (TC): 0.8559 - Whole tumor (WT): 0.9026 - Enhancing tumor (ET): 0.7905 - Average: 0.8518 ## commands example Execute training: ``` python -m monai.bundle run training --meta_file configs/metadata.json --config_file configs/train.json --logging_file configs/logging.conf ``` Override the `train` config to execute multi-GPU training: ``` torchrun --standalone --nnodes=1 --nproc_per_node=8 -m monai.bundle run training --meta_file configs/metadata.json --config_file "['configs/train.json','configs/multi_gpu_train.json']" --logging_file configs/logging.conf ``` Override the `train` config to execute evaluation with the trained model: ``` python -m monai.bundle run evaluating --meta_file configs/metadata.json --config_file "['configs/train.json','configs/evaluate.json']" --logging_file configs/logging.conf ``` Execute inference: ``` python -m monai.bundle run evaluating --meta_file configs/metadata.json --config_file configs/inference.json --logging_file configs/logging.conf ``` # Disclaimer This is an example, not to be used for diagnostic purposes. # References [1] Myronenko, Andriy. "3D MRI brain tumor segmentation using autoencoder regularization." International MICCAI Brainlesion Workshop. Springer, Cham, 2018. https://arxiv.org/abs/1810.11654.
vinayak361/token_fine_tunned_flipkart
vinayak361
2022-07-06T09:32:50Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "distilbert", "token-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-07-06T07:42:01Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: token_fine_tunned_flipkart results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # token_fine_tunned_flipkart This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0992 - Precision: 0.9526 - Recall: 0.9669 - F1: 0.9597 - Accuracy: 0.9730 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 135 | 0.5967 | 0.7227 | 0.7830 | 0.7516 | 0.7932 | | No log | 2.0 | 270 | 0.3673 | 0.8105 | 0.8623 | 0.8356 | 0.8747 | | No log | 3.0 | 405 | 0.2679 | 0.8676 | 0.8854 | 0.8764 | 0.9094 | | 0.6219 | 4.0 | 540 | 0.1972 | 0.8955 | 0.9217 | 0.9084 | 0.9355 | | 0.6219 | 5.0 | 675 | 0.1500 | 0.9229 | 0.9374 | 0.9301 | 0.9525 | | 0.6219 | 6.0 | 810 | 0.1240 | 0.9341 | 0.9509 | 0.9424 | 0.9609 | | 0.6219 | 7.0 | 945 | 0.1041 | 0.9516 | 0.9650 | 0.9582 | 0.9720 | | 0.2085 | 8.0 | 1080 | 0.0992 | 0.9526 | 0.9669 | 0.9597 | 0.9730 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu102 - Datasets 2.2.2 - Tokenizers 0.12.1
laurian/pouet
laurian
2022-07-06T08:44:36Z
0
0
null
[ "region:us" ]
null
2022-07-06T08:42:05Z
valkiry robot desert technology
florentgbelidji/clip-text-feature-extraction
florentgbelidji
2022-07-06T08:28:24Z
6
0
transformers
[ "transformers", "pytorch", "feature-extraction", "sentence_embedding", "endpoints_compatible", "region:us" ]
feature-extraction
2022-07-05T09:42:40Z
--- tags: - feature-extraction - sentence_embedding ---
zuppif/maskformer-swin-small-ade
zuppif
2022-07-06T07:24:51Z
6
0
transformers
[ "transformers", "pytorch", "maskformer", "object-detection", "COCO", "YOLO", "Darknet", "model-index", "endpoints_compatible", "region:us" ]
object-detection
2022-03-11T15:01:28Z
--- tags: - object-detection - COCO - YOLO - Darknet model-index: - name: moon results: - metrics: - type: mAP value: 1 name: mAP task: type: object-detection name: object-detection dataset: name: COCO type: COCO ---
go2k/TEST2ppo-LunarLander-v2
go2k
2022-07-06T06:26:51Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-07-06T06:26:22Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 200.81 +/- 77.09 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
hsohn3/cchs-bert-visit-uncased-wordlevel-block512-batch4-ep100
hsohn3
2022-07-06T06:03:07Z
3
0
transformers
[ "transformers", "tf", "bert", "fill-mask", "generated_from_keras_callback", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-07-05T19:36:06Z
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: hsohn3/cchs-bert-visit-uncased-wordlevel-block512-batch4-ep100 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # hsohn3/cchs-bert-visit-uncased-wordlevel-block512-batch4-ep100 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.7195 - Epoch: 99 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 2e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Epoch | |:----------:|:-----:| | 3.8730 | 0 | | 3.0562 | 1 | | 3.0168 | 2 | | 3.0032 | 3 | | 2.9954 | 4 | | 2.9951 | 5 | | 2.9904 | 6 | | 2.9765 | 7 | | 2.9788 | 8 | | 2.9692 | 9 | | 2.9656 | 10 | | 2.9761 | 11 | | 2.9643 | 12 | | 2.9393 | 13 | | 2.9026 | 14 | | 2.8685 | 15 | | 2.8438 | 16 | | 2.8279 | 17 | | 2.8107 | 18 | | 2.7896 | 19 | | 2.7716 | 20 | | 2.7458 | 21 | | 2.7118 | 22 | | 2.6519 | 23 | | 2.5933 | 24 | | 2.4702 | 25 | | 2.2842 | 26 | | 2.0712 | 27 | | 1.8406 | 28 | | 1.6374 | 29 | | 1.4836 | 30 | | 1.3824 | 31 | | 1.3079 | 32 | | 1.2538 | 33 | | 1.2054 | 34 | | 1.1700 | 35 | | 1.1432 | 36 | | 1.1122 | 37 | | 1.0939 | 38 | | 1.0645 | 39 | | 1.0465 | 40 | | 1.0248 | 41 | | 1.0069 | 42 | | 0.9902 | 43 | | 0.9769 | 44 | | 0.9510 | 45 | | 0.9394 | 46 | | 0.9316 | 47 | | 0.9181 | 48 | | 0.9090 | 49 | | 0.9010 | 50 | | 0.8934 | 51 | | 0.8791 | 52 | | 0.8759 | 53 | | 0.8652 | 54 | | 0.8566 | 55 | | 0.8511 | 56 | | 0.8414 | 57 | | 0.8373 | 58 | | 0.8302 | 59 | | 0.8241 | 60 | | 0.8246 | 61 | | 0.8207 | 62 | | 0.8110 | 63 | | 0.8081 | 64 | | 0.8010 | 65 | | 0.7995 | 66 | | 0.7965 | 67 | | 0.7941 | 68 | | 0.7849 | 69 | | 0.7866 | 70 | | 0.7874 | 71 | | 0.7796 | 72 | | 0.7742 | 73 | | 0.7706 | 74 | | 0.7687 | 75 | | 0.7686 | 76 | | 0.7663 | 77 | | 0.7586 | 78 | | 0.7554 | 79 | | 0.7563 | 80 | | 0.7541 | 81 | | 0.7527 | 82 | | 0.7482 | 83 | | 0.7460 | 84 | | 0.7436 | 85 | | 0.7423 | 86 | | 0.7422 | 87 | | 0.7385 | 88 | | 0.7367 | 89 | | 0.7321 | 90 | | 0.7320 | 91 | | 0.7354 | 92 | | 0.7271 | 93 | | 0.7270 | 94 | | 0.7210 | 95 | | 0.7236 | 96 | | 0.7263 | 97 | | 0.7237 | 98 | | 0.7195 | 99 | ### Framework versions - Transformers 4.20.1 - TensorFlow 2.8.2 - Datasets 2.3.2 - Tokenizers 0.12.1
dee4hf/autotrain-deephate2-1093539673
dee4hf
2022-07-06T04:28:59Z
3
0
transformers
[ "transformers", "pytorch", "albert", "text-classification", "autotrain", "bn", "dataset:dee4hf/autotrain-data-deephate2", "co2_eq_emissions", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-07-06T04:25:25Z
--- tags: autotrain language: bn widget: - text: "I love AutoTrain 🤗" datasets: - dee4hf/autotrain-data-deephate2 co2_eq_emissions: 7.663051290039914 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 1093539673 - CO2 Emissions (in grams): 7.663051290039914 ## Validation Metrics - Loss: 0.34404119849205017 - Accuracy: 0.8843120070113936 - Macro F1: 0.8771237753798016 - Micro F1: 0.8843120070113936 - Weighted F1: 0.8843498914288083 - Macro Precision: 0.8745249813256932 - Micro Precision: 0.8843120070113936 - Weighted Precision: 0.8854719661321065 - Macro Recall: 0.8812563739901838 - Micro Recall: 0.8843120070113936 - Weighted Recall: 0.8843120070113936 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/dee4hf/autotrain-deephate2-1093539673 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("dee4hf/autotrain-deephate2-1093539673", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("dee4hf/autotrain-deephate2-1093539673", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
wiselinjayajos/t5-end2end-questions-generation-squadV2
wiselinjayajos
2022-07-06T02:27:13Z
6
0
transformers
[ "transformers", "pytorch", "t5", "text2text-generation", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text2text-generation
2022-06-23T20:32:03Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: t5-end2end-questions-generation-squadV2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-end2end-questions-generation-squadV2 This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
domenicrosati/deberta-v3-xsmall-finetuned-review_classifier
domenicrosati
2022-07-06T01:09:25Z
5
0
transformers
[ "transformers", "pytorch", "tensorboard", "deberta-v2", "text-classification", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-07-05T20:16:35Z
--- license: mit tags: - text-classification - generated_from_trainer metrics: - accuracy - f1 model-index: - name: deberta-v3-xsmall-finetuned-review_classifier results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-v3-xsmall-finetuned-review_classifier This model is a fine-tuned version of [microsoft/deberta-v3-xsmall](https://huggingface.co/microsoft/deberta-v3-xsmall) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1441 - Accuracy: 0.9513 - F1: 0.7458 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4.5e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:| | 0.1518 | 1.0 | 6667 | 0.1575 | 0.9510 | 0.7155 | | 0.1247 | 2.0 | 13334 | 0.1441 | 0.9513 | 0.7458 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
emilys/twitter-roberta-base-dec2021-WNUT
emilys
2022-07-05T22:26:37Z
4
0
transformers
[ "transformers", "pytorch", "roberta", "token-classification", "generated_from_trainer", "dataset:wnut_17", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-07-05T22:21:52Z
--- tags: - generated_from_trainer datasets: - wnut_17 metrics: - precision - recall - f1 - accuracy model-index: - name: twitter-roberta-base-dec2021-WNUT results: - task: name: Token Classification type: token-classification dataset: name: wnut_17 type: wnut_17 args: wnut_17 metrics: - name: Precision type: precision value: 0.7111716621253406 - name: Recall type: recall value: 0.6244019138755981 - name: F1 type: f1 value: 0.664968152866242 - name: Accuracy type: accuracy value: 0.9642789042140724 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # twitter-roberta-base-dec2021-WNUT This model is a fine-tuned version of [cardiffnlp/twitter-roberta-base-dec2021](https://huggingface.co/cardiffnlp/twitter-roberta-base-dec2021) on the wnut_17 dataset. It achieves the following results on the evaluation set: - Loss: 0.2152 - Precision: 0.7112 - Recall: 0.6244 - F1: 0.6650 - Accuracy: 0.9643 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 1024 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 0.46 | 25 | 0.2818 | 0.0982 | 0.0383 | 0.0551 | 0.9241 | | No log | 0.93 | 50 | 0.2158 | 0.6181 | 0.4569 | 0.5254 | 0.9480 | | No log | 1.39 | 75 | 0.1930 | 0.6682 | 0.5347 | 0.5940 | 0.9555 | | No log | 1.85 | 100 | 0.1728 | 0.6583 | 0.5646 | 0.6079 | 0.9594 | | No log | 2.31 | 125 | 0.1787 | 0.7050 | 0.5718 | 0.6314 | 0.9619 | | No log | 2.78 | 150 | 0.2051 | 0.6979 | 0.5251 | 0.5993 | 0.9587 | | No log | 3.24 | 175 | 0.1755 | 0.7172 | 0.5945 | 0.6501 | 0.9621 | | No log | 3.7 | 200 | 0.1720 | 0.6943 | 0.6304 | 0.6608 | 0.9645 | | No log | 4.17 | 225 | 0.1873 | 0.7203 | 0.6316 | 0.6730 | 0.9646 | | No log | 4.63 | 250 | 0.1781 | 0.6934 | 0.6196 | 0.6545 | 0.9638 | | No log | 5.09 | 275 | 0.1953 | 0.7040 | 0.6172 | 0.6577 | 0.9631 | | No log | 5.56 | 300 | 0.1953 | 0.7223 | 0.6316 | 0.6739 | 0.9642 | | No log | 6.02 | 325 | 0.1839 | 0.7008 | 0.6471 | 0.6729 | 0.9648 | | No log | 6.48 | 350 | 0.1995 | 0.716 | 0.6423 | 0.6772 | 0.9650 | | No log | 6.94 | 375 | 0.2056 | 0.7251 | 0.6184 | 0.6675 | 0.9640 | | No log | 7.41 | 400 | 0.2044 | 0.7065 | 0.6220 | 0.6616 | 0.9640 | | No log | 7.87 | 425 | 0.2042 | 0.7201 | 0.6400 | 0.6776 | 0.9650 | | No log | 8.33 | 450 | 0.2247 | 0.7280 | 0.6244 | 0.6722 | 0.9638 | | No log | 8.8 | 475 | 0.2060 | 0.7064 | 0.6447 | 0.6742 | 0.9649 | | 0.0675 | 9.26 | 500 | 0.2152 | 0.7112 | 0.6244 | 0.6650 | 0.9643 | | 0.0675 | 9.72 | 525 | 0.2086 | 0.7070 | 0.6495 | 0.6771 | 0.9650 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0 - Datasets 2.3.2 - Tokenizers 0.12.1
Evelyn18/distilbert-base-uncased-becas-4
Evelyn18
2022-07-05T21:55:19Z
20
0
transformers
[ "transformers", "pytorch", "tensorboard", "distilbert", "question-answering", "generated_from_trainer", "dataset:becasv2", "license:apache-2.0", "endpoints_compatible", "region:us" ]
question-answering
2022-07-01T02:20:43Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - becasv2 model-index: - name: distilbert-base-uncased-becas-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-becas-4 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the becasv2 dataset. It achieves the following results on the evaluation set: - Loss: 3.1357 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 10 - eval_batch_size: 10 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 9 | 4.9618 | | No log | 2.0 | 18 | 4.1071 | | No log | 3.0 | 27 | 3.5438 | | No log | 4.0 | 36 | 3.2115 | | No log | 5.0 | 45 | 2.9524 | | No log | 6.0 | 54 | 3.0645 | | No log | 7.0 | 63 | 2.9351 | | No log | 8.0 | 72 | 3.1037 | | No log | 9.0 | 81 | 3.1132 | | No log | 10.0 | 90 | 3.1357 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
rbawden/CCASS-semi-auto-titrages-base
rbawden
2022-07-05T21:42:57Z
16
1
transformers
[ "transformers", "pytorch", "fsmt", "fr", "license:cc-by-4.0", "endpoints_compatible", "region:us" ]
null
2022-06-16T09:32:27Z
--- language: fr license: cc-by-4.0 --- # Cour de Cassation semi-automatic *titrage* prediction model Model for the semi-automatic prediction of *titrages* (keyword sequence) from *sommaires* (synthesis of legal cases). The models are similar to the automatic models described in [this paper](https://hal.inria.fr/hal-03663110/file/LREC_2022___CCass_Inria-camera-ready.pdf) and to the model available [here](https://huggingface.co/rbawden/CCASS-pred-titrages-base). If you use this semi-automatic model, please cite our research paper (see [below](#cite)). ## Model description The model is a transformer-base model trained on parallel data (sommaires-titrages) provided by the Cour de Cassation. The model was intially trained using the Fairseq toolkit, converted to HuggingFace and then fine-tuned on the original training data to smooth out minor differences that arose during the conversion process. Tokenisation is performed using a SentencePiece model, the BPE strategy and a vocab size of 8000. ### Intended uses & limitations This model is to be used to help in the production of *titrages* for those *sommaires* that do not have them or to complement existing (manually) created *titrages*. ### How to use Contrary to the [automatic *titrage* prediction model](https://huggingface.co/rbawden/CCASS-pred-titrages-base) (designed to predict the entire sequence), this model is designed to help in the manual production of *titrages*, by proposing the next *titre* (keyword) in the sequence given a *sommaire* and the beginning of the *titrage*. Model input is the *matière* (matter) concatenated to the *titres* already decided on (separated by <t>), concatenated to the text from the sommaire separated by the token `<t>`. Each example should be on a single line. E.g. `bail <t> résiliation <t> causes <t> La recommendation du tribunal selon l'article...` (fictive example for illustrative purposes, where the matter=bail, the beginning of the *titrage*=résiliation <t> causes. The maximum input length of the model is 1024 input tokens (after tokenisation). ``` from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokeniser = AutoTokenizer.from_pretrained("rbawden/CCASS-semi-auto-titrages-base") model = AutoModelForSeq2SeqLM.from_pretrained("rbawden/CCASS-semi-auto-titrages-base") matiere_and_titrage_prefix = "matter <t> titre" sommaire = "full text from the sommaire on a single line" inputs = tokeniser([matiere_and_titrage_prefix + " <t> " + sommaire], return_tensors='pt') outputs = model.generate(inputs['input_ids']) tokeniser.batch_decode(outputs, skip_special_tokens=True, clean_up_tokenisation_spaces=True) ``` ### Limitations and bias The models' predictions should not be taken as ground-truth *titrages* and the final decision should be the expert's. The model is not constrained to predict *titres* that have previously been seen, so this should be taken into account in the deployment of this model as a *titrage* tool in order to avoid the multiplication of different *titres*. ## Training data Training data is provided by the Cour de Cassation (the original source being Jurinet data, but with pseudo-anonymisation applied). For training, we use a total of 159,836 parallel examples (each example is a sommaire-titrage pair). Our development data consists of 1,833 held-out examples. ## Training procedure ### Preprocessing We use SentencePiece, the BPE strategy and a joint vocabulary of 8000 tokens. This model was converted into the HuggingFace format and integrates a number of normalisation processes (e.g. removing double doubles, apostrophes and quotes, normalisation of different accent formats, lowercasing). ### Training The model was initialised trained using Fairseq until convergence on the development set (according to our customised weighted accuracy measure - please see [the paper](https://hal.inria.fr/hal-03663110/file/LREC_2022___CCass_Inria-camera-ready.pdf) for more details). The model was then converted to HuggingFace and training continued to smooth out incoherences introduced during the conversion procedure (incompatibilities in the way the SentencePiece and NMT vocabularies are defined, linked to HuggingFace vocabularies being necessarily the same as the tokeniser vocabulary, a constraint that is not imposed in Fairseq). ### Evaluation results Full results for the initial (automatic) Fairseq models can be found in [the paper](https://hal.inria.fr/hal-03663110/file/LREC_2022___CCass_Inria-camera-ready.pdf). Results on this semi-automatic model coming soon! ## BibTex entry and citation info <a name="cite"></a> If you use this work, please cite the following article: Thibault Charmet, Inès Cherichi, Matthieu Allain, Urszula Czerwinska, Amaury Fouret, Benoît Sagot and Rachel Bawden, 2022. [**Complex Labelling and Similarity Prediction in Legal Texts: Automatic Analysis of France’s Court of Cassation Rulings**](http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.509.pdf). In Proceedings of the 13th Language Resources and Evaluation Conference, Marseille, France.] ``` @inproceedings{charmet-et-al-2022-complex, tite = {Complex Labelling and Similarity Prediction in Legal Texts: Automatic Analysis of France’s Court of Cassation Rulings}, author = {Charmet, Thibault and Cherichi, Inès and Allain, Matthieu and Czerwinska, Urszula and Fouret, Amaury, and Sagot, Benoît and Bawden, Rachel}, booktitle = {Proceedings of the 13th Language Resources and Evaluation Conference}, year = {2022}, address = {Marseille, France}, pages = {4754--4766}, url = {http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.509.pdf} ```
rbawden/CCASS-auto-titrages-base
rbawden
2022-07-05T21:42:01Z
11
1
transformers
[ "transformers", "pytorch", "fsmt", "fr", "license:cc-by-4.0", "endpoints_compatible", "region:us" ]
null
2022-06-03T15:36:54Z
--- language: fr license: cc-by-4.0 --- # Cour de Cassation automatic *titrage* prediction model Model for the automatic prediction of *titrages* (keyword sequence) from *sommaires* (synthesis of legal cases). The models are described in [this paper](https://hal.inria.fr/hal-03663110/file/LREC_2022___CCass_Inria-camera-ready.pdf). If you use this model, please cite our research paper (see [below](#cite)). ## Model description The model is a transformer-base model trained on parallel data (sommaires-titrages) provided by the Cour de Cassation. The model was intially trained using the Fairseq toolkit, converted to HuggingFace and then fine-tuned on the original training data to smooth out minor differences that arose during the conversion process. Tokenisation is performed using a SentencePiece model, the BPE strategy and a vocab size of 8000. ### Intended uses & limitations This model is to be used to produce *titrages* for those *sommaires* that do not have them or to complement existing (manually) created *titrages*. ### How to use Model input is the *matière* (matter) concatenated to the text from the sommaire separated by the token `<t>`. Each example should be on a single line. E.g. `bail <t> La recommendation du tribunal selon l'article...` (fictive example for illustrative purposes. The maximum input length of the model is 1024 input tokens (after tokenisation). ``` from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokeniser = AutoTokenizer.from_pretrained("rbawden/CCASS-auto-titrages-base") model = AutoModelForSeq2SeqLM.from_pretrained("rbawden/CCASS-auto-titrages-base") matiere = "matter" sommaire = "full text from the sommaire on a single line" inputs = tokeniser([matiere + " <t> " + sommaire], return_tensors='pt') outputs = model.generate(inputs['input_ids']) tokeniser.batch_decode(outputs, skip_special_tokens=True, clean_up_tokenisation_spaces=True) ``` ### Limitations and bias The models' predictions should not be taken as ground-truth *titrages* and should always be indicated as being automatically generated. They were designed not to be used as such, but to improve search coverage for improved similarity prediction between different cases (the predicted *titrages* being used to predict the similarity). The model is not constrained to predict *titres* that have previously been seen, so this should be taken into account in the deployment of this model as a *titrage* tool in order to avoid the multiplication of different *titres*. ## Training data Training data is provided by the Cour de Cassation (the original source being Jurinet data, but with pseudo-anonymisation applied). For training, we use a total of 159,836 parallel examples (each example is a sommaire-titrage pair). Our development data consists of 1,833 held-out examples. ## Training procedure ### Preprocessing We use SentencePiece, the BPE strategy and a joint vocabulary of 8000 tokens. This model was converted into the HuggingFace format and integrates a number of normalisation processes (e.g. removing double doubles, apostrophes and quotes, normalisation of different accent formats, lowercasing). ### Training The model was initialised trained using Fairseq until convergence on the development set (according to our customised weighted accuracy measure - please see [the paper](https://hal.inria.fr/hal-03663110/file/LREC_2022___CCass_Inria-camera-ready.pdf) for more details). The model was then converted to HuggingFace and training continued to smooth out incoherences introduced during the conversion procedure (incompatibilities in the way the SentencePiece and NMT vocabularies are defined, linked to HuggingFace vocabularies being necessarily the same as the tokeniser vocabulary, a constraint that is not imposed in Fairseq). ### Evaluation results Full results for the initial Fairseq models can be found in [the paper](https://hal.inria.fr/hal-03663110/file/LREC_2022___CCass_Inria-camera-ready.pdf). Results on this converted model coming soon! ## BibTex entry and citation info <a name="cite"></a> If you use this work, please cite the following article: Thibault Charmet, Inès Cherichi, Matthieu Allain, Urszula Czerwinska, Amaury Fouret, Benoît Sagot and Rachel Bawden, 2022. [**Complex Labelling and Similarity Prediction in Legal Texts: Automatic Analysis of France’s Court of Cassation Rulings**](http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.509.pdf). In Proceedings of the 13th Language Resources and Evaluation Conference, Marseille, France.] ``` @inproceedings{charmet-et-al-2022-complex, tite = {Complex Labelling and Similarity Prediction in Legal Texts: Automatic Analysis of France’s Court of Cassation Rulings}, author = {Charmet, Thibault and Cherichi, Inès and Allain, Matthieu and Czerwinska, Urszula and Fouret, Amaury, and Sagot, Benoît and Bawden, Rachel}, booktitle = {Proceedings of the 13th Language Resources and Evaluation Conference}, year = {2022}, address = {Marseille, France}, pages = {4754--4766}, url = {http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.509.pdf} ```
Krisna/finetuning-sentiment-model-3000-samples
Krisna
2022-07-05T20:14:31Z
5
0
transformers
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-07-05T15:42:13Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-3000-samples results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3366 - Accuracy: 0.86 - F1: 0.8636 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Tokenizers 0.12.1
aspis/data2vec-text-finetuned-squad2
aspis
2022-07-05T20:03:52Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "data2vec-text", "question-answering", "generated_from_trainer", "dataset:squad_v2", "license:mit", "endpoints_compatible", "region:us" ]
question-answering
2022-07-05T10:58:52Z
--- license: mit tags: - generated_from_trainer datasets: - squad_v2 model-index: - name: data2vec-text-finetuned-squad2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # data2vec-text-finetuned-squad2 This model is a fine-tuned version of [facebook/data2vec-text-base](https://huggingface.co/facebook/data2vec-text-base) on the squad_v2 dataset. It achieves the following results on the evaluation set: - Loss: 1.1044 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.0173 | 1.0 | 8239 | 0.9629 | | 0.7861 | 2.0 | 16478 | 1.0098 | | 0.6402 | 3.0 | 24717 | 1.1044 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
pm390/Reinforce-pong-01
pm390
2022-07-05T19:49:27Z
0
0
null
[ "Pong-PLE-v0", "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class", "model-index", "region:us" ]
reinforcement-learning
2022-07-05T19:49:16Z
--- tags: - Pong-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-pong-01 results: - metrics: - type: mean_reward value: -16.00 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pong-PLE-v0 type: Pong-PLE-v0 --- # **Reinforce** Agent playing **Pong-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pong-PLE-v0** . To learn to use this model and train yours check Unit 5 of the Deep Reinforcement Learning Class: https://github.com/huggingface/deep-rl-class/tree/main/unit5
BigTimeCoderSean/ppo-LunarLander-v2
BigTimeCoderSean
2022-07-05T19:29:40Z
1
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-07-05T19:29:07Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 191.74 +/- 31.06 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```