Training in progress, step 40
Browse files- axolotl_config.yaml +0 -7
- config.json +1 -2
- ds_config.yml +1 -1
- model.safetensors +2 -2
- training_args.bin +1 -1
axolotl_config.yaml
CHANGED
@@ -1,6 +1,4 @@
|
|
1 |
-
adapter: lora
|
2 |
base_model: JackFram/llama-68m
|
3 |
-
bf16: true
|
4 |
chat_template: tokenizer_default_fallback_alpaca
|
5 |
datasets:
|
6 |
- format: custom
|
@@ -25,10 +23,6 @@ hub_model_id: SystemAdmin123/test-repo
|
|
25 |
hub_strategy: checkpoint
|
26 |
learning_rate: 0.0002
|
27 |
logging_steps: 10
|
28 |
-
lora_alpha: 256
|
29 |
-
lora_dropout: 0.1
|
30 |
-
lora_r: 128
|
31 |
-
lora_target_linear: true
|
32 |
max_steps: 2500
|
33 |
micro_batch_size: 1
|
34 |
num_epochs: 100
|
@@ -42,7 +36,6 @@ sequence_len: 2048
|
|
42 |
special_tokens:
|
43 |
pad_token: </s>
|
44 |
tokenizer_type: LlamaTokenizerFast
|
45 |
-
torch_dtype: bf16
|
46 |
trust_remote_code: true
|
47 |
val_set_size: 0.1
|
48 |
wandb_entity: ''
|
|
|
|
|
1 |
base_model: JackFram/llama-68m
|
|
|
2 |
chat_template: tokenizer_default_fallback_alpaca
|
3 |
datasets:
|
4 |
- format: custom
|
|
|
23 |
hub_strategy: checkpoint
|
24 |
learning_rate: 0.0002
|
25 |
logging_steps: 10
|
|
|
|
|
|
|
|
|
26 |
max_steps: 2500
|
27 |
micro_batch_size: 1
|
28 |
num_epochs: 100
|
|
|
36 |
special_tokens:
|
37 |
pad_token: </s>
|
38 |
tokenizer_type: LlamaTokenizerFast
|
|
|
39 |
trust_remote_code: true
|
40 |
val_set_size: 0.1
|
41 |
wandb_entity: ''
|
config.json
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
{
|
2 |
-
"_attn_implementation_autoset": true,
|
3 |
"_name_or_path": "JackFram/llama-68m",
|
4 |
"architectures": [
|
5 |
"LlamaForCausalLM"
|
@@ -25,7 +24,7 @@
|
|
25 |
"rope_scaling": null,
|
26 |
"rope_theta": 10000.0,
|
27 |
"tie_word_embeddings": false,
|
28 |
-
"torch_dtype": "
|
29 |
"transformers_version": "4.48.1",
|
30 |
"use_cache": false,
|
31 |
"vocab_size": 32000
|
|
|
1 |
{
|
|
|
2 |
"_name_or_path": "JackFram/llama-68m",
|
3 |
"architectures": [
|
4 |
"LlamaForCausalLM"
|
|
|
24 |
"rope_scaling": null,
|
25 |
"rope_theta": 10000.0,
|
26 |
"tie_word_embeddings": false,
|
27 |
+
"torch_dtype": "bfloat16",
|
28 |
"transformers_version": "4.48.1",
|
29 |
"use_cache": false,
|
30 |
"vocab_size": 32000
|
ds_config.yml
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"train_micro_batch_size_per_gpu": 1, "gradient_accumulation_steps": 32, "steps_per_print": 200, "bf16": {"enabled":
|
|
|
1 |
+
{"train_micro_batch_size_per_gpu": 1, "gradient_accumulation_steps": 32, "steps_per_print": 200, "bf16": {"enabled": false}, "zero_optimization": {"stage": 2, "allgather_partitions": true, "reduce_scatter": true, "overlap_comm": true, "contiguous_gradients": true, "reduce_bucket_size": 5000000, "allgather_bucket_size": 5000000}, "optimizer": {"type": "torch.optim.AdamW", "params": {"lr": 0.0002, "betas": [0.9, 0.999], "eps": 1e-08, "weight_decay": 0.01}}, "scheduler": {"type": "WarmupCosineSchedule", "params": {"warmup_min_lr": 0, "warmup_max_lr": 0.0002, "warmup_num_steps": 125}}}
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb869319ab00023388d1e988acef0010027b3678790ccae9050ce5b80348b1f4
|
3 |
+
size 136062744
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 6840
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5414fad3ccf622c5fff1f84e82069b56b14de9a52dbbb9ddf1d853ff6aff2a29
|
3 |
size 6840
|