Upload folder using huggingface_hub
Browse files- config.json +44 -0
- generation_config.json +8 -0
- onnx/decoder_model.onnx +3 -0
- onnx/decoder_model_merged.onnx +3 -0
- onnx/decoder_with_past_model.onnx +3 -0
- onnx/encoder_model.onnx +3 -0
- preprocessor_config.json +12 -0
- sentencepiece.bpe.model +3 -0
- special_tokens_map.json +6 -0
- tokenizer_config.json +51 -0
- vocab.json +0 -0
config.json
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_attn_implementation_autoset": true,
|
3 |
+
"_name_or_path": "hf-internal-testing/tiny-random-Speech2TextForConditionalGeneration",
|
4 |
+
"activation_dropout": 0.0,
|
5 |
+
"activation_function": "relu",
|
6 |
+
"architectures": [
|
7 |
+
"Speech2TextForConditionalGeneration"
|
8 |
+
],
|
9 |
+
"attention_dropout": 0.1,
|
10 |
+
"bos_token_id": 0,
|
11 |
+
"classifier_dropout": 0.0,
|
12 |
+
"conv_channels": 32,
|
13 |
+
"conv_kernel_sizes": [
|
14 |
+
5,
|
15 |
+
5
|
16 |
+
],
|
17 |
+
"d_model": 16,
|
18 |
+
"decoder_attention_heads": 4,
|
19 |
+
"decoder_ffn_dim": 4,
|
20 |
+
"decoder_layerdrop": 0.0,
|
21 |
+
"decoder_layers": 2,
|
22 |
+
"decoder_start_token_id": 2,
|
23 |
+
"dropout": 0.1,
|
24 |
+
"encoder_attention_heads": 4,
|
25 |
+
"encoder_ffn_dim": 4,
|
26 |
+
"encoder_layerdrop": 0.0,
|
27 |
+
"encoder_layers": 2,
|
28 |
+
"eos_token_id": 2,
|
29 |
+
"init_std": 0.02,
|
30 |
+
"input_channels": 1,
|
31 |
+
"input_feat_per_channel": 24,
|
32 |
+
"is_encoder_decoder": true,
|
33 |
+
"max_position_embeddings": 20,
|
34 |
+
"max_source_positions": 20,
|
35 |
+
"max_target_positions": 20,
|
36 |
+
"model_type": "speech_to_text",
|
37 |
+
"num_conv_layers": 2,
|
38 |
+
"num_hidden_layers": 2,
|
39 |
+
"pad_token_id": 1,
|
40 |
+
"scale_embedding": true,
|
41 |
+
"transformers_version": "4.48.2",
|
42 |
+
"use_cache": true,
|
43 |
+
"vocab_size": 10000
|
44 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 0,
|
4 |
+
"decoder_start_token_id": 2,
|
5 |
+
"eos_token_id": 2,
|
6 |
+
"pad_token_id": 1,
|
7 |
+
"transformers_version": "4.48.2"
|
8 |
+
}
|
onnx/decoder_model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ef5b3b68e1278db0f6020b54b848cbb60f18afcb8e025c1d1f34a61735654fb3
|
3 |
+
size 756105
|
onnx/decoder_model_merged.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4aeceef187b3a792b41024be7bf78dba8fc9d0d126259340fb37aa178901ebda
|
3 |
+
size 836880
|
onnx/decoder_with_past_model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:94ef2ede6424dde0cee31ebdb149e5c6eb5eb7a78916c09b50d1bb696876d337
|
3 |
+
size 733413
|
onnx/encoder_model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7050a966f1d76c3d32e8f653ca5b1fc205b5cda526044e080d88f4d6b9789b67
|
3 |
+
size 94142
|
preprocessor_config.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_ceptral_normalize": true,
|
3 |
+
"feature_extractor_type": "Speech2TextFeatureExtractor",
|
4 |
+
"feature_size": 24,
|
5 |
+
"normalize_means": true,
|
6 |
+
"normalize_vars": true,
|
7 |
+
"num_mel_bins": 24,
|
8 |
+
"padding_side": "right",
|
9 |
+
"padding_value": 0.0,
|
10 |
+
"return_attention_mask": true,
|
11 |
+
"sampling_rate": 16000
|
12 |
+
}
|
sentencepiece.bpe.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:052a168787a9160b4b2ba54e4995e9600298812c34191ca3f70cea51cd4f5c1e
|
3 |
+
size 416684
|
special_tokens_map.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"eos_token": "</s>",
|
4 |
+
"pad_token": "<pad>",
|
5 |
+
"unk_token": "<unk>"
|
6 |
+
}
|
tokenizer_config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<s>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<pad>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "</s>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "<unk>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
}
|
35 |
+
},
|
36 |
+
"additional_special_tokens": [],
|
37 |
+
"bos_token": "<s>",
|
38 |
+
"clean_up_tokenization_spaces": false,
|
39 |
+
"do_lower_case": true,
|
40 |
+
"do_upper_case": false,
|
41 |
+
"eos_token": "</s>",
|
42 |
+
"extra_special_tokens": {},
|
43 |
+
"lang_codes": null,
|
44 |
+
"model_max_length": 20,
|
45 |
+
"pad_token": "<pad>",
|
46 |
+
"processor_class": "Speech2TextProcessor",
|
47 |
+
"sp_model_kwargs": {},
|
48 |
+
"tgt_lang": null,
|
49 |
+
"tokenizer_class": "Speech2TextTokenizer",
|
50 |
+
"unk_token": "<unk>"
|
51 |
+
}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|