alvarobartt HF Staff commited on
Commit
f94fdeb
·
1 Parent(s): ecdeb30

Upload 2 files

Browse files
Files changed (2) hide show
  1. causallm-to-hub.py +51 -0
  2. dpo-qlora-4bit.py +83 -0
causallm-to-hub.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Usage: python upload.py --dir <dir> --hub-name <hub_name>
2
+
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import torch
5
+
6
+ import argparse
7
+
8
+
9
+ def get_args():
10
+ parser = argparse.ArgumentParser()
11
+ parser.add_argument("--path", type=str)
12
+ parser.add_argument("--hub-name", type=str)
13
+ return parser.parse_args()
14
+
15
+
16
+ def main():
17
+ args = get_args()
18
+ print(f"Args: {args}")
19
+
20
+ print(f"Loading tokenizer from path: {args.path}")
21
+ tokenizer = AutoTokenizer.from_pretrained(args.path)
22
+ print(f"Pushing the tokenizer to the Hub at {args.hub_name}")
23
+ tokenizer.push_to_hub(args.hub_name, private=True)
24
+
25
+ print(f"Loading model from path: {args.path}")
26
+ model = AutoModelForCausalLM.from_pretrained(
27
+ args.path,
28
+ return_dict=True,
29
+ torch_dtype=torch.bfloat16,
30
+ device_map="auto",
31
+ )
32
+ print(f"Pushing the model to the Hub at {args.hub_name}")
33
+ model.push_to_hub(args.hub_name, private=True)
34
+
35
+ from huggingface_hub import HfApi
36
+
37
+ api = HfApi()
38
+ for file in ["all_results.json", "eval_results.json"]:
39
+ try:
40
+ api.upload_file(
41
+ path_or_fileobj=f"{args.peft}/{file}",
42
+ path_in_repo=file,
43
+ repo_id=args.out,
44
+ repo_type="model",
45
+ )
46
+ except Exception as e:
47
+ print(f"Failed to upload {file}: {e}")
48
+
49
+
50
+ if __name__ == "__main__":
51
+ main()
dpo-qlora-4bit.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from datasets import load_dataset
3
+ from peft import LoraConfig, get_peft_model
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+ from trl import DPOTrainer
6
+
7
+
8
+ if __name__ == "__main__":
9
+ model_name = "..."
10
+ dataset = load_dataset(...)
11
+
12
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
13
+ tokenizer.pad_token = tokenizer.eos_token
14
+
15
+ model = AutoModelForCausalLM.from_pretrained(
16
+ model_name,
17
+ low_cpu_mem_usage=True,
18
+ torch_dtype=torch.bfloat16,
19
+ load_in_4bit=True,
20
+ use_flash_attention_2=True,
21
+ bnb_4bit_compute_dtype=torch.bfloat16,
22
+ bnb_4bit_quant_type="nf4",
23
+ )
24
+ model.resize_token_embeddings(len(tokenizer))
25
+ model.config.pad_token_id = tokenizer.pad_token_id
26
+ model.config.use_cache = False
27
+
28
+ ref_model = AutoModelForCausalLM.from_pretrained(
29
+ model_name,
30
+ low_cpu_mem_usage=True,
31
+ torch_dtype=torch.bfloat16,
32
+ load_in_4bit=True,
33
+ use_flash_attention_2=True,
34
+ bnb_4bit_compute_dtype=torch.bfloat16,
35
+ ).eval()
36
+
37
+ peft_config = LoraConfig(
38
+ lora_alpha=128,
39
+ lora_dropout=0.05,
40
+ r=64,
41
+ bias="none",
42
+ task_type="CAUSAL_LM",
43
+ target_modules=[
44
+ "q_proj",
45
+ "k_proj",
46
+ "v_proj",
47
+ ],
48
+ )
49
+ model = get_peft_model(model, peft_config)
50
+
51
+ training_args = DPOConfig(
52
+ num_train_epochs=3,
53
+ learning_rate=5e-07,
54
+ per_device_train_batch_size=1,
55
+ do_eval=True,
56
+ per_device_eval_batch_size=1,
57
+ adam_epsilon=1e-08,
58
+ lr_scheduler_type="linear",
59
+ warmup_ratio=0.1,
60
+ seed=42,
61
+ logging_steps=100,
62
+ save_steps=500,
63
+ save_strategy="steps",
64
+ output_dir="./output-dir",
65
+ gradient_checkpointing=True,
66
+ bf16=True,
67
+ remove_unused_columns=False,
68
+ )
69
+
70
+ dpo_trainer = DPOTrainer(
71
+ model,
72
+ ref_model,
73
+ args=training_args,
74
+ beta=training_args.beta,
75
+ train_dataset=dataset["train"],
76
+ eval_dataset=dataset["test"],
77
+ tokenizer=tokenizer,
78
+ max_length=training_args.max_length,
79
+ max_prompt_length=training_args.max_prompt_length,
80
+ peft_config=peft_config,
81
+ )
82
+ dpo_trainer.train()
83
+ dpo_trainer.save_model()