SystemAdmin123 commited on
Commit
f8b226a
·
verified ·
1 Parent(s): 8f76511

Training in progress, step 40

Browse files
axolotl_config.yaml CHANGED
@@ -1,5 +1,5 @@
1
- base_model: unsloth/OpenHermes-2.5-Mistral-7B
2
- batch_size: 16
3
  bf16: true
4
  chat_template: tokenizer_default_fallback_alpaca
5
  datasets:
@@ -25,7 +25,7 @@ learning_rate: 0.0002
25
  logging_steps: 10
26
  lr_scheduler: cosine
27
  max_steps: 2500
28
- micro_batch_size: 2
29
  model_type: AutoModelForCausalLM
30
  num_epochs: 100
31
  optimizer: adamw_bnb_8bit
@@ -36,13 +36,13 @@ sample_packing: true
36
  save_steps: 40
37
  save_total_limit: 2
38
  sequence_len: 2048
39
- tokenizer_type: LlamaTokenizerFast
40
  torch_dtype: bf16
41
  trust_remote_code: true
42
  val_set_size: 0.1
43
  wandb_entity: ''
44
  wandb_mode: online
45
- wandb_name: unsloth/OpenHermes-2.5-Mistral-7B-argilla/databricks-dolly-15k-curated-en
46
  wandb_project: Gradients-On-Demand
47
  wandb_run: your_name
48
  wandb_runid: default
 
1
+ base_model: facebook/opt-125m
2
+ batch_size: 128
3
  bf16: true
4
  chat_template: tokenizer_default_fallback_alpaca
5
  datasets:
 
25
  logging_steps: 10
26
  lr_scheduler: cosine
27
  max_steps: 2500
28
+ micro_batch_size: 16
29
  model_type: AutoModelForCausalLM
30
  num_epochs: 100
31
  optimizer: adamw_bnb_8bit
 
36
  save_steps: 40
37
  save_total_limit: 2
38
  sequence_len: 2048
39
+ tokenizer_type: GPT2TokenizerFast
40
  torch_dtype: bf16
41
  trust_remote_code: true
42
  val_set_size: 0.1
43
  wandb_entity: ''
44
  wandb_mode: online
45
+ wandb_name: facebook/opt-125m-argilla/databricks-dolly-15k-curated-en
46
  wandb_project: Gradients-On-Demand
47
  wandb_run: your_name
48
  wandb_runid: default
config.json CHANGED
@@ -1,28 +1,31 @@
1
  {
2
- "_name_or_path": "unsloth/OpenHermes-2.5-Mistral-7B",
 
 
 
3
  "architectures": [
4
- "MistralForCausalLM"
5
  ],
6
  "attention_dropout": 0.0,
7
- "bos_token_id": 1,
8
- "eos_token_id": 32000,
9
- "head_dim": 128,
10
- "hidden_act": "silu",
11
- "hidden_size": 4096,
12
- "initializer_range": 0.02,
13
- "intermediate_size": 14336,
14
- "max_position_embeddings": 32768,
15
- "model_type": "mistral",
16
- "num_attention_heads": 32,
17
- "num_hidden_layers": 32,
18
- "num_key_value_heads": 8,
19
- "pad_token_id": 0,
20
- "rms_norm_eps": 1e-05,
21
- "rope_theta": 10000.0,
22
- "sliding_window": 4096,
23
- "tie_word_embeddings": false,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.48.1",
26
  "use_cache": false,
27
- "vocab_size": 32002
 
28
  }
 
1
  {
2
+ "_name_or_path": "facebook/opt-125m",
3
+ "_remove_final_layer_norm": false,
4
+ "activation_dropout": 0.0,
5
+ "activation_function": "relu",
6
  "architectures": [
7
+ "OPTForCausalLM"
8
  ],
9
  "attention_dropout": 0.0,
10
+ "bos_token_id": 2,
11
+ "do_layer_norm_before": true,
12
+ "dropout": 0.1,
13
+ "enable_bias": true,
14
+ "eos_token_id": 2,
15
+ "ffn_dim": 3072,
16
+ "hidden_size": 768,
17
+ "init_std": 0.02,
18
+ "layer_norm_elementwise_affine": true,
19
+ "layerdrop": 0.0,
20
+ "max_position_embeddings": 2048,
21
+ "model_type": "opt",
22
+ "num_attention_heads": 12,
23
+ "num_hidden_layers": 12,
24
+ "pad_token_id": 1,
25
+ "prefix": "</s>",
 
26
  "torch_dtype": "bfloat16",
27
  "transformers_version": "4.48.1",
28
  "use_cache": false,
29
+ "vocab_size": 50265,
30
+ "word_embed_proj_dim": 768
31
  }
merges.txt CHANGED
The diff for this file is too large to render. See raw diff
 
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87e0feac30a2b7a0f3658bfbd058914e27b5492b737a7f09f8b8bcbcb91e35df
3
- size 136062744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:020fb30959dcfbcac82451f41a438ae05e3b42759d7aef54dbd8b1b18fa92a0b
3
+ size 250490408
special_tokens_map.json CHANGED
@@ -1,29 +1,29 @@
1
  {
2
  "bos_token": {
3
- "content": "<s>",
4
  "lstrip": false,
5
- "normalized": false,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
- "content": "<|im_end|>",
11
  "lstrip": false,
12
- "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
  "pad_token": {
17
- "content": "<unk>",
18
  "lstrip": false,
19
- "normalized": false,
20
  "rstrip": false,
21
  "single_word": false
22
  },
23
  "unk_token": {
24
- "content": "<unk>",
25
  "lstrip": false,
26
- "normalized": false,
27
  "rstrip": false,
28
  "single_word": false
29
  }
 
1
  {
2
  "bos_token": {
3
+ "content": "</s>",
4
  "lstrip": false,
5
+ "normalized": true,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
+ "content": "</s>",
11
  "lstrip": false,
12
+ "normalized": true,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
  "pad_token": {
17
+ "content": "<pad>",
18
  "lstrip": false,
19
+ "normalized": true,
20
  "rstrip": false,
21
  "single_word": false
22
  },
23
  "unk_token": {
24
+ "content": "</s>",
25
  "lstrip": false,
26
+ "normalized": true,
27
  "rstrip": false,
28
  "single_word": false
29
  }
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:04222cd76979c181cd3f72c3bf6982fe2a09d9f4b8f23d82902efde18f1d0668
3
- size 3506125
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a80800503667fe0bd639ad10c33879f747ba1582f369b06abb21f3f65d5ad3b
3
+ size 3558658
tokenizer_config.json CHANGED
@@ -1,20 +1,11 @@
1
  {
2
  "add_bos_token": true,
3
- "add_eos_token": false,
4
- "add_prefix_space": true,
5
  "added_tokens_decoder": {
6
- "0": {
7
- "content": "<unk>",
8
- "lstrip": false,
9
- "normalized": false,
10
- "rstrip": false,
11
- "single_word": false,
12
- "special": true
13
- },
14
  "1": {
15
- "content": "<s>",
16
  "lstrip": false,
17
- "normalized": false,
18
  "rstrip": false,
19
  "single_word": false,
20
  "special": true
@@ -22,43 +13,21 @@
22
  "2": {
23
  "content": "</s>",
24
  "lstrip": false,
25
- "normalized": false,
26
- "rstrip": false,
27
- "single_word": false,
28
- "special": true
29
- },
30
- "32000": {
31
- "content": "<|im_end|>",
32
- "lstrip": false,
33
- "normalized": false,
34
- "rstrip": false,
35
- "single_word": false,
36
- "special": true
37
- },
38
- "32001": {
39
- "content": "<|im_start|>",
40
- "lstrip": false,
41
- "normalized": false,
42
  "rstrip": false,
43
  "single_word": false,
44
  "special": true
45
  }
46
  },
47
- "additional_special_tokens": [],
48
- "bos_token": "<s>",
49
- "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
50
  "clean_up_tokenization_spaces": false,
51
- "eos_token": "<|im_end|>",
 
52
  "extra_special_tokens": {},
53
- "legacy": true,
54
- "model_max_length": 32768,
55
- "pad_token": "<unk>",
56
- "padding_side": "right",
57
- "sp_model_kwargs": {},
58
- "spaces_between_special_tokens": false,
59
- "tokenizer_class": "LlamaTokenizer",
60
- "trust_remote_code": false,
61
- "unk_token": "<unk>",
62
- "use_default_system_prompt": true,
63
  "use_fast": true
64
  }
 
1
  {
2
  "add_bos_token": true,
3
+ "add_prefix_space": false,
 
4
  "added_tokens_decoder": {
 
 
 
 
 
 
 
 
5
  "1": {
6
+ "content": "<pad>",
7
  "lstrip": false,
8
+ "normalized": true,
9
  "rstrip": false,
10
  "single_word": false,
11
  "special": true
 
13
  "2": {
14
  "content": "</s>",
15
  "lstrip": false,
16
+ "normalized": true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  "rstrip": false,
18
  "single_word": false,
19
  "special": true
20
  }
21
  },
22
+ "bos_token": "</s>",
23
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '### Instruction: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ '### Response: ' + message['content'] + eos_token}}{% endif %}{% endfor %}",
 
24
  "clean_up_tokenization_spaces": false,
25
+ "eos_token": "</s>",
26
+ "errors": "replace",
27
  "extra_special_tokens": {},
28
+ "model_max_length": 1000000000000000019884624838656,
29
+ "pad_token": "<pad>",
30
+ "tokenizer_class": "GPT2Tokenizer",
31
+ "unk_token": "</s>",
 
 
 
 
 
 
32
  "use_fast": true
33
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:53ebcd35271d188c087a5f00ee35959f144cc8fbcdfc0d1744678c3f065510bd
3
- size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3d5f1e240ecc8a87b6c5953fd6de84410c7aff5f9646db311ea17ff72210b93
3
+ size 6840
vocab.json CHANGED
The diff for this file is too large to render. See raw diff