trainSpace / train.py
pathii's picture
Update train.py
60c3115 verified
import os
from unsloth import FastLanguageModel
from transformers import TrainingArguments, Trainer
from datasets import load_dataset
import torch
# Validate environment variable
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
raise ValueError("HF_TOKEN environment variable not set")
# Load quantized model
try:
model, tokenizer = FastLanguageModel.from_pretrained(
model_name="deepseek-ai/DeepSeek-V3",
dtype=torch.bfloat16,
load_in_4bit=True,
token=HF_TOKEN
)
FastLanguageModel.for_training(model)
except Exception as e:
raise RuntimeError(f"Failed to load model: {str(e)}")
# Load and prepare dataset (example - replace with your actual dataset)
try:
dataset = load_dataset("imdb") # Example dataset
tokenized_dataset = dataset.map(
lambda x: tokenizer(x["text"], truncation=True, padding="max_length"),
batched=True
)
except Exception as e:
raise RuntimeError(f"Failed to load/prepare dataset: {str(e)}")
# Training arguments
training_args = TrainingArguments(
output_dir="/app/checkpoints",
per_device_train_batch_size=4,
per_device_eval_batch_size=4,
num_train_epochs=2,
learning_rate=2e-5,
save_steps=500,
save_total_limit=2,
evaluation_strategy="steps",
eval_steps=500,
logging_dir="/app/logs",
logging_steps=100,
fp16=False,
bf16=True,
deepspeed="/app/ds_config.json"
)
# Initialize trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset["train"],
eval_dataset=tokenized_dataset["test"]
)
# Train
try:
trainer.train()
except Exception as e:
raise RuntimeError(f"Training failed: {str(e)}")
# Save model
model.save_pretrained("/app/fine_tuned_model")
tokenizer.save_pretrained("/app/fine_tuned_model")