Spaces:
Build error
Build error
File size: 1,821 Bytes
60c3115 2df1810 60c3115 2df1810 60c3115 2df1810 60c3115 2df1810 60c3115 2df1810 60c3115 2df1810 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
import os
from unsloth import FastLanguageModel
from transformers import TrainingArguments, Trainer
from datasets import load_dataset
import torch
# Validate environment variable
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
raise ValueError("HF_TOKEN environment variable not set")
# Load quantized model
try:
model, tokenizer = FastLanguageModel.from_pretrained(
model_name="deepseek-ai/DeepSeek-V3",
dtype=torch.bfloat16,
load_in_4bit=True,
token=HF_TOKEN
)
FastLanguageModel.for_training(model)
except Exception as e:
raise RuntimeError(f"Failed to load model: {str(e)}")
# Load and prepare dataset (example - replace with your actual dataset)
try:
dataset = load_dataset("imdb") # Example dataset
tokenized_dataset = dataset.map(
lambda x: tokenizer(x["text"], truncation=True, padding="max_length"),
batched=True
)
except Exception as e:
raise RuntimeError(f"Failed to load/prepare dataset: {str(e)}")
# Training arguments
training_args = TrainingArguments(
output_dir="/app/checkpoints",
per_device_train_batch_size=4,
per_device_eval_batch_size=4,
num_train_epochs=2,
learning_rate=2e-5,
save_steps=500,
save_total_limit=2,
evaluation_strategy="steps",
eval_steps=500,
logging_dir="/app/logs",
logging_steps=100,
fp16=False,
bf16=True,
deepspeed="/app/ds_config.json"
)
# Initialize trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset["train"],
eval_dataset=tokenized_dataset["test"]
)
# Train
try:
trainer.train()
except Exception as e:
raise RuntimeError(f"Training failed: {str(e)}")
# Save model
model.save_pretrained("/app/fine_tuned_model")
tokenizer.save_pretrained("/app/fine_tuned_model") |