Marko Tasic
out/pretrain-core/final
c2fc44c
model_name: tangled-alpha-0.1-core
model_config:
name: tangled-alpha-0.1-core
hf_config: {}
block_size: 131072
n_layer: 32
n_embd: 512
vocab_size: 32064
padding_multiple: 512
padded_vocab_size: 32064
norm_class_name: RMSNorm
norm_eps: 1.0e-05
norm_qk: false
post_attention_norm: false
post_mlp_norm: false
parallel_residual: false
shared_attention_norm: false
n_head: 4
head_size: 128
n_query_groups: 4
attn_bias: false
rope_base: 500000
rotary_percentage: 1.0
rope_condense_ratio: 1
rope_adjustments:
factor: 32.0
low_freq_factor: 1.0
high_freq_factor: 4.0
original_max_seq_len: 8192
intermediate_size: 2688
bias: false
mlp_class_name: LLaMAMLP
gelu_approximate: none
n_expert: 0
n_expert_per_token: 0
scale_embeddings: false
lm_head_bias: false
out_dir: ../out/pretrain-core
precision: bf16-true
resume: auto
data:
class_path: litgpt.data.LitData
init_args:
data_path: ../core-data-0-8192-2000/
seed: 42
num_workers: 32
train:
save_interval: 100
log_interval: 1
global_batch_size: 512
micro_batch_size: 2
lr_warmup_steps: 200
max_tokens: 7318364160
max_seq_length: 8192
tie_embeddings: true
max_norm: 1.0
min_lr: 1.0e-05
eval:
interval: 50
max_iters: 100
initial_validation: false
final_validation: true
evaluate_example: first
optimizer:
class_path: grokadamw.GrokAdamW
init_args:
lr: 0.0001
weight_decay: 0.01
betas:
- 0.9
- 0.999
devices: auto
num_nodes: 1
tokenizer_dir: ..
logger_name: wandb
seed: 23