Spaces:
Sleeping
Sleeping
import os | |
import torch | |
import glob | |
import gc | |
from transformers import ( | |
AutoModelForCausalLM, | |
AutoTokenizer, | |
BitsAndBytesConfig, | |
TrainingArguments, | |
Trainer, | |
DataCollatorForLanguageModeling, | |
AutoTokenizer, | |
LlamaConfig, | |
AutoConfig | |
) | |
from peft import LoraConfig, TaskType, get_peft_model, prepare_model_for_kbit_training | |
from datasets import Dataset | |
from huggingface_hub import snapshot_download | |
from tqdm import tqdm | |
import gradio as gr | |
import math | |
from accelerate import Accelerator | |
import subprocess | |
import sys | |
import json | |
import shutil | |
# --- Configuration --- | |
YOUR_HF_USERNAME = "Twelve2five" | |
MODEL_REPO_NAME = "llama-3-8b-rvq-resized" | |
DATASET_REPO_NAME = "podcast-dialogue-rvq-pairs-3items" | |
hf_model_repo_id = f"{YOUR_HF_USERNAME}/{MODEL_REPO_NAME}" | |
hf_dataset_repo_id = f"{YOUR_HF_USERNAME}/{DATASET_REPO_NAME}" | |
# Output directories | |
OUTPUT_TRAINING_DIR = "./llama3-8b-rvq-qlora-finetuned-run" | |
LOGGING_DIR = "./llama3-8b-rvq-qlora-logs-run" | |
local_download_path = "./downloaded_dataset_files" | |
# Training parameters | |
NUM_EPOCHS = 1 | |
BATCH_SIZE_PER_DEVICE = 1 | |
GRAD_ACCUMULATION_STEPS = 64 | |
LEARNING_RATE = 1e-4 | |
WEIGHT_DECAY = 0.01 | |
WARMUP_RATIO = 0.03 | |
LR_SCHEDULER = "cosine" | |
OPTIMIZER = "paged_adamw_8bit" | |
MAX_SEQ_LENGTH = 256 | |
MICRO_BATCH_SIZE = 1 | |
# Multi-GPU configuration | |
accelerator = Accelerator() | |
# Configure environment for multi-GPU | |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:32" | |
# Print GPU information | |
print(f"Available GPUs: {torch.cuda.device_count()}") | |
for i in range(torch.cuda.device_count()): | |
print(f"GPU {i}: {torch.cuda.get_device_name(i)} with {torch.cuda.get_device_properties(i).total_memory / 1e9:.2f} GB") | |
def seq2seq_causal_collator(features): | |
""" | |
Collator that concatenates context (input_ids) and target (labels) | |
for Causal LM sequence-to-sequence training. | |
Masks the loss for the context part of the sequence. | |
Pads sequences to the maximum length in the batch. | |
""" | |
batch = {} | |
concatenated_input_ids = [] | |
concatenated_labels = [] | |
max_len = 0 | |
# --- First pass: Concatenate, create masked labels, find max length --- | |
for feature in features: | |
# Dataset transform should provide tensors here | |
input_ids = feature['input_ids'] | |
labels = feature['labels'] | |
# Ensure tensors are 1D (handle potential extra dims if any) | |
if input_ids.dim() > 1: input_ids = input_ids.squeeze() | |
if labels.dim() > 1: labels = labels.squeeze() | |
context_len = input_ids.shape[0] | |
target_len = labels.shape[0] | |
# Concatenate context and target for input | |
combined_ids = torch.cat([input_ids, labels], dim=0) | |
concatenated_input_ids.append(combined_ids) | |
# Create labels: -100 for context, actual labels for target | |
masked_labels = torch.cat([ | |
torch.full((context_len,), -100, dtype=torch.long, device=input_ids.device), | |
labels | |
], dim=0) | |
concatenated_labels.append(masked_labels) | |
# Track max length for padding | |
if combined_ids.shape[0] > max_len: | |
max_len = combined_ids.shape[0] | |
# --- Second pass: Pad to max length --- | |
padded_input_ids = [] | |
padded_labels = [] | |
input_pad_token_id = 0 | |
label_pad_token_id = -100 | |
for i in range(len(features)): | |
ids = concatenated_input_ids[i] | |
lbls = concatenated_labels[i] | |
padding_len = max_len - ids.shape[0] | |
# Pad on the right side | |
padded_input_ids.append(torch.nn.functional.pad( | |
ids, (0, padding_len), value=input_pad_token_id | |
)) | |
padded_labels.append(torch.nn.functional.pad( | |
lbls, (0, padding_len), value=label_pad_token_id | |
)) | |
# --- Stack and create final batch --- | |
batch['input_ids'] = torch.stack(padded_input_ids) | |
batch['labels'] = torch.stack(padded_labels) | |
# Create attention mask (1 for real tokens, 0 for padding) | |
batch['attention_mask'] = batch['input_ids'].ne(input_pad_token_id).long() | |
return batch | |
def prepare_for_dataset(batch): | |
output = {'input_ids': [], 'labels': []} | |
for item in batch: | |
output['input_ids'].append(item['input_ids'].cpu().tolist()) | |
output['labels'].append(item['labels'].cpu().tolist()) | |
return output | |
def load_model(): | |
print(f"Loading base model architecture from: {hf_model_repo_id}") | |
# Get information about GPU with most free memory | |
gpu_id = 0 # Default to first GPU | |
max_free_memory = 0 | |
for i in range(torch.cuda.device_count()): | |
free_memory = torch.cuda.get_device_properties(i).total_memory - torch.cuda.memory_allocated(i) | |
if free_memory > max_free_memory: | |
max_free_memory = free_memory | |
gpu_id = i | |
print(f"Loading model on GPU {gpu_id} with {max_free_memory / 1e9:.2f}GB free memory") | |
# Configure quantization | |
bnb_config = BitsAndBytesConfig( | |
load_in_4bit=True, | |
bnb_4bit_use_double_quant=True, | |
bnb_4bit_quant_type="nf4", | |
bnb_4bit_compute_dtype=torch.bfloat16 | |
) | |
# Load the model | |
try: | |
# First update transformers to make sure we have latest version | |
subprocess.check_call([sys.executable, "-m", "pip", "install", "--upgrade", "transformers"]) | |
# Now try loading with explicit config class to avoid auto-detection issues | |
from transformers import LlamaConfig | |
# Load config first | |
config = LlamaConfig.from_pretrained( | |
hf_model_repo_id, | |
trust_remote_code=True | |
) | |
# Then load model with explicit config | |
model = AutoModelForCausalLM.from_pretrained( | |
hf_model_repo_id, | |
config=config, | |
quantization_config=bnb_config, | |
device_map="auto", | |
trust_remote_code=True | |
) | |
log.append(f"Loaded model vocab size: {model.config.vocab_size}") | |
log.append(f"Input embedding shape: {model.get_input_embeddings().weight.shape}") | |
except Exception as e: | |
error_msg = f"Error loading model from Hub: {e}" | |
log.append(error_msg) | |
# Try with a fallback method | |
try: | |
log.append("Attempting alternative loading method...") | |
# Try loading without auto detection | |
model = AutoModelForCausalLM.from_pretrained( | |
hf_model_repo_id, | |
quantization_config=bnb_config, | |
device_map="auto", | |
trust_remote_code=True, | |
torch_dtype=torch.bfloat16, | |
# Add these to help with the loading | |
revision="main", | |
low_cpu_mem_usage=True, | |
) | |
log.append("Alternative loading successful!") | |
log.append(f"Loaded model vocab size: {model.config.vocab_size}") | |
except Exception as e2: | |
log.append(f"Alternative loading also failed: {e2}") | |
return "\n".join(log) | |
# Load the official Meta tokenizer for LLaMA 3 | |
tokenizer = AutoTokenizer.from_pretrained( | |
"meta-llama/Llama-3-8B", # Use the official Meta tokenizer | |
use_auth_token=os.environ.get("HF_TOKEN", None) # In case it's needed | |
) | |
if tokenizer is None: | |
# Fallback to another common foundation model tokenizer | |
print("Falling back to another tokenizer as Meta tokenizer requires auth token") | |
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") | |
print(f"Loaded tokenizer vocabulary size: {len(tokenizer)}") | |
# Print information about input embeddings | |
print(f"Input embedding shape: {model.get_input_embeddings().weight.shape}") | |
# Prepare model for k-bit training | |
model = prepare_model_for_kbit_training(model) | |
# Define LoRA configuration | |
lora_config = LoraConfig( | |
r=16, | |
lora_alpha=32, | |
target_modules=[ | |
"q_proj", | |
"k_proj", | |
"v_proj", | |
"o_proj", | |
"gate_proj", | |
"up_proj", | |
"down_proj", | |
], | |
lora_dropout=0.05, | |
bias="none", | |
task_type=TaskType.CAUSAL_LM | |
) | |
# Apply LoRA to model | |
model = get_peft_model(model, lora_config) | |
model.print_trainable_parameters() | |
return model, tokenizer # Return both model and tokenizer | |
def load_dataset(): | |
# --- Download the dataset repository files --- | |
try: | |
os.makedirs(local_download_path, exist_ok=True) | |
downloaded_repo_root = snapshot_download( | |
repo_id=hf_dataset_repo_id, | |
repo_type="dataset", | |
local_dir=local_download_path, | |
local_dir_use_symlinks=False | |
) | |
print(f"Dataset repository content downloaded to: {downloaded_repo_root}") | |
except Exception as e: | |
print(f"Error downloading dataset: {e}") | |
return None | |
# --- Load .pt files into a Hugging Face Dataset object --- | |
pairs_dir = os.path.join(downloaded_repo_root, "final_rvq_pairs") | |
all_pair_files = glob.glob(os.path.join(pairs_dir, "*_rvq_pairs.pt")) | |
if not all_pair_files: | |
all_pair_files = glob.glob(os.path.join(downloaded_repo_root, "*_rvq_pairs.pt")) | |
if not all_pair_files: | |
print("No RVQ pair files found!") | |
return None | |
print(f"Found {len(all_pair_files)} RVQ pair files.") | |
# Load data from .pt files into memory | |
all_data_pairs = [] | |
for file_path in tqdm(all_pair_files, desc="Loading pair files"): | |
try: | |
episode_pairs = torch.load(file_path, map_location='cpu') | |
all_data_pairs.extend(episode_pairs) | |
except Exception as e: | |
print(f"Warning: Could not load file {file_path}: {e}") | |
if not all_data_pairs: | |
return None | |
print(f"Loaded {len(all_data_pairs)} training pairs.") | |
# Convert to Hugging Face Dataset | |
chunk_size = 1000 | |
processed_data = {'input_ids': [], 'labels': []} | |
for i in tqdm(range(0, len(all_data_pairs), chunk_size), desc="Preparing data"): | |
batch = all_data_pairs[i:i + chunk_size] | |
prepared_batch = prepare_for_dataset(batch) | |
processed_data['input_ids'].extend(prepared_batch['input_ids']) | |
processed_data['labels'].extend(prepared_batch['labels']) | |
hf_dataset = Dataset.from_dict(processed_data) | |
# Transform to get tensors back | |
hf_dataset.set_transform(lambda batch: { | |
'input_ids': [torch.tensor(ids, dtype=torch.long) for ids in batch['input_ids']], | |
'labels': [torch.tensor(lbls, dtype=torch.long) for lbls in batch['labels']] | |
}) | |
# Cleanup | |
del all_data_pairs | |
del processed_data | |
gc.collect() | |
return hf_dataset | |
# Memory cleaning function | |
def clean_memory(): | |
gc.collect() | |
if torch.cuda.is_available(): | |
for i in range(torch.cuda.device_count()): | |
with torch.cuda.device(f'cuda:{i}'): | |
torch.cuda.empty_cache() | |
torch.cuda.reset_peak_memory_stats() | |
def train_model( | |
hf_username, | |
model_repo_name, | |
dataset_repo_name, | |
epochs=1, | |
batch_size=1, | |
grad_accum_steps=16, # Increased from 8 to 16 | |
learning_rate=1e-4, | |
progress=gr.Progress() | |
): | |
progress(0, desc="Setting up environment...") | |
log = [] | |
# Aggressive memory cleanup | |
gc.collect() | |
if torch.cuda.is_available(): | |
torch.cuda.empty_cache() | |
# Reset peak memory stats | |
torch.cuda.reset_peak_memory_stats() | |
# Clean up any existing model files to save space | |
if os.path.exists("./model_files"): | |
try: | |
shutil.rmtree("./model_files") | |
except Exception as e: | |
log.append(f"Warning: Could not remove existing model files: {e}") | |
if os.path.exists("./downloaded_dataset_files"): | |
try: | |
shutil.rmtree("./downloaded_dataset_files") | |
except Exception as e: | |
log.append(f"Warning: Could not remove existing dataset files: {e}") | |
# Print GPU info | |
if torch.cuda.is_available(): | |
log.append(f"Available GPUs: {torch.cuda.device_count()}") | |
for i in range(torch.cuda.device_count()): | |
gpu_name = torch.cuda.get_device_name(i) | |
gpu_memory = torch.cuda.get_device_properties(i).total_memory / (1024**3) | |
log.append(f"GPU {i}: {gpu_name} with {gpu_memory:.2f} GB") | |
# Import required libraries | |
try: | |
from datasets import Dataset | |
from huggingface_hub import snapshot_download | |
import torch | |
import transformers | |
from transformers import AutoModelForCausalLM, LlamaConfig, LlamaForCausalLM | |
from transformers import BitsAndBytesConfig, TrainingArguments, Trainer, AutoTokenizer | |
from peft import LoraConfig, TaskType, get_peft_model, prepare_model_for_kbit_training | |
log.append(f"Transformers version: {transformers.__version__}") | |
log.append(f"PyTorch version: {torch.__version__}") | |
except ImportError as e: | |
log.append(f"Error importing libraries: {e}") | |
return "\n".join(log) | |
# --- Configuration --- | |
progress(0.05, desc="Setting up configuration...") | |
hf_model_repo_id = f"{hf_username}/{model_repo_name}" | |
hf_dataset_repo_id = f"{hf_username}/{dataset_repo_name}" | |
log.append(f"Model repo: {hf_model_repo_id}") | |
log.append(f"Dataset repo: {hf_dataset_repo_id}") | |
# Check if running on multiple GPUs | |
n_gpus = torch.cuda.device_count() | |
log.append(f"Number of GPUs available: {n_gpus}") | |
# --- Load Base Model (with extreme quantization) --- | |
progress(0.1, desc="Loading base model...") | |
local_model_path = "./model_files" | |
try: | |
# Download the model files | |
snapshot_download( | |
repo_id=hf_model_repo_id, | |
local_dir=local_model_path, | |
local_dir_use_symlinks=False | |
) | |
log.append(f"Model files downloaded to {local_model_path}") | |
# Ensure model_type is set correctly in the config | |
config_path = os.path.join(local_model_path, "config.json") | |
with open(config_path, "r") as f: | |
config_data = json.load(f) | |
model_type = config_data.get("model_type", "") | |
log.append(f"Model architecture type: {model_type}") | |
# Force model_type to be "llama" if it's not already | |
if model_type != "llama": | |
config_data["model_type"] = "llama" | |
# Also ensure architectures is set correctly | |
config_data["architectures"] = ["LlamaForCausalLM"] | |
with open(config_path, "w") as f: | |
json.dump(config_data, f, indent=2) | |
log.append("Updated config.json to use llama model_type") | |
# Load the config first | |
config = LlamaConfig.from_pretrained(local_model_path) | |
log.append(f"Successfully loaded config: {config.model_type}") | |
# Use 4-bit quantization for extreme memory savings | |
bnb_config = BitsAndBytesConfig( | |
load_in_4bit=True, | |
bnb_4bit_use_double_quant=True, | |
bnb_4bit_quant_type="nf4", | |
bnb_4bit_compute_dtype=torch.bfloat16 | |
) | |
# Load tokenizer first (needed for dataset preparation) | |
tokenizer = AutoTokenizer.from_pretrained(local_model_path) | |
# Explicit device map to enable CPU offloading | |
max_memory = {0: "40GB", "cpu": "64GB"} | |
# Load the model with extreme memory optimization | |
model = LlamaForCausalLM.from_pretrained( | |
local_model_path, | |
config=config, | |
quantization_config=bnb_config, | |
device_map="auto", | |
max_memory=max_memory, | |
torch_dtype=torch.bfloat16, | |
low_cpu_mem_usage=True | |
) | |
log.append(f"Loaded model vocab size: {model.config.vocab_size}") | |
log.append(f"Input embedding shape: {model.get_input_embeddings().weight.shape}") | |
except Exception as e: | |
error_msg = f"Error loading model: {str(e)}" | |
log.append(error_msg) | |
return "\n".join(log) | |
# --- Prepare for K-bit Training & Apply LoRA --- | |
progress(0.15, desc="Preparing model for fine-tuning...") | |
try: | |
model = prepare_model_for_kbit_training(model) | |
log.append("Model prepared for k-bit training") | |
# Use minimal LoRA configuration with fewer parameters | |
lora_config = LoraConfig( | |
task_type=TaskType.CAUSAL_LM, | |
r=8, # Reduced from 16 to 8 | |
lora_alpha=16, # Reduced from 32 to 16 | |
lora_dropout=0.05, | |
bias="none", | |
# Target only key modules to reduce memory usage | |
target_modules=["q_proj", "v_proj"] # Reduced target modules | |
) | |
# Apply LoRA | |
peft_model = get_peft_model(model, lora_config) | |
model_to_train = peft_model | |
log.append("LoRA applied to model") | |
# Free memory | |
gc.collect() | |
if torch.cuda.is_available(): | |
torch.cuda.empty_cache() | |
except Exception as e: | |
error_msg = f"Error preparing model for training: {str(e)}" | |
log.append(error_msg) | |
return "\n".join(log) | |
# --- Download and Process Dataset --- | |
progress(0.2, desc="Loading dataset...") | |
try: | |
# Download the dataset files | |
dataset_dir = os.path.join(os.getcwd(), "downloaded_dataset_files") | |
snapshot_download( | |
repo_id=hf_dataset_repo_id, | |
local_dir=dataset_dir, | |
local_dir_use_symlinks=False | |
) | |
log.append(f"Dataset repository content downloaded to: {dataset_dir}") | |
# Find all RVQ pair files | |
rvq_pair_files = glob.glob(os.path.join(dataset_dir, "*_rvq_pairs.pt")) | |
log.append(f"Found {len(rvq_pair_files)} RVQ pair files.") | |
# Load training pairs from the dataset | |
training_pairs = [] | |
# For memory conservation, use only half the dataset for now | |
max_file_count = min(12, len(rvq_pair_files)) | |
for i, pair_file in enumerate(rvq_pair_files[:max_file_count]): | |
try: | |
pairs = torch.load(pair_file) | |
training_pairs.extend(pairs) | |
except Exception as e: | |
log.append(f"Warning: Could not load {pair_file}: {e}") | |
log.append(f"Loaded a total of {len(training_pairs)} training pairs into memory.") | |
# Prepare dataset | |
dataset = Dataset.from_dict({ | |
"input_ids": [pair[0].tolist() for pair in training_pairs], | |
"labels": [pair[1].tolist() for pair in training_pairs] | |
}) | |
# Clear the training_pairs to free memory | |
training_pairs = None | |
gc.collect() | |
torch.cuda.empty_cache() | |
# Use a smaller max_length to reduce memory pressure | |
max_length = 512 # Reduced max sequence length | |
# Create data collator that handles padding | |
def data_collator(examples): | |
# Convert lists back to tensors | |
for i in range(len(examples)): | |
examples[i]["input_ids"] = torch.tensor(examples[i]["input_ids"], dtype=torch.long) | |
examples[i]["labels"] = torch.tensor(examples[i]["labels"], dtype=torch.long) | |
# Get max length in this batch | |
batch_max_length = min( | |
max(len(example["input_ids"]) for example in examples), | |
max_length | |
) | |
batch = { | |
"input_ids": [], | |
"attention_mask": [], | |
"labels": [] | |
} | |
# Prepare sequences | |
for example in examples: | |
input_ids = example["input_ids"][:batch_max_length] | |
labels = example["labels"][:batch_max_length] | |
# Pad sequences | |
padding_length = batch_max_length - len(input_ids) | |
attention_mask = torch.ones_like(input_ids) | |
if padding_length > 0: | |
padding = torch.ones(padding_length, dtype=input_ids.dtype) * tokenizer.pad_token_id | |
input_ids = torch.cat([input_ids, padding]) | |
labels = torch.cat([labels, padding * -100]) # -100 to ignore in loss computation | |
attention_mask = torch.cat([attention_mask, torch.zeros(padding_length, dtype=attention_mask.dtype)]) | |
batch["input_ids"].append(input_ids) | |
batch["attention_mask"].append(attention_mask) | |
batch["labels"].append(labels) | |
# Convert lists to tensors | |
for key in batch: | |
batch[key] = torch.stack(batch[key]) | |
return batch | |
# Convert to training dataset | |
train_dataset = dataset | |
# Free memory | |
del dataset | |
gc.collect() | |
torch.cuda.empty_cache() | |
except Exception as e: | |
error_msg = f"Error loading dataset: {str(e)}" | |
log.append(error_msg) | |
return "\n".join(log) | |
# --- Training Arguments --- | |
progress(0.3, desc="Setting up training arguments...") | |
output_dir = f"./results_{model_repo_name}" | |
os.makedirs(output_dir, exist_ok=True) | |
# Super-aggressive memory conservation | |
training_args = TrainingArguments( | |
output_dir=output_dir, | |
num_train_epochs=float(epochs), | |
per_device_train_batch_size=batch_size, | |
gradient_accumulation_steps=grad_accum_steps, | |
learning_rate=learning_rate, | |
weight_decay=0.01, | |
logging_dir=f"{output_dir}/logs", | |
logging_steps=1, # Log frequently to see progress | |
save_steps=25, # Save checkpoints more frequently | |
save_total_limit=1, # Keep only one checkpoint to save space | |
remove_unused_columns=False, | |
push_to_hub=False, | |
disable_tqdm=False, | |
warmup_ratio=0.03, | |
lr_scheduler_type="cosine", | |
report_to="tensorboard", | |
bf16=True, | |
fp16=False, | |
# Memory optimization | |
gradient_checkpointing=True, | |
gradient_checkpointing_kwargs={'use_reentrant': False}, | |
max_grad_norm=0.3, # Reduced from default 1.0 | |
dataloader_pin_memory=False, # Reduce memory pressure | |
# Optimizer settings for memory efficiency | |
optim="adamw_torch", | |
adam_beta1=0.9, | |
adam_beta2=0.999, | |
adam_epsilon=1e-8, | |
# Evaluation settings | |
do_eval=False, | |
evaluation_strategy="no", | |
# Set this for smaller chunks of data processing | |
dataloader_num_workers=1, | |
# For memory efficiency when loading datasets | |
dataloader_drop_last=True, | |
) | |
# --- Initialize Trainer --- | |
progress(0.4, desc="Initializing trainer...") | |
# Use optimizer that requires less memory | |
class MemoryEfficientTrainer(Trainer): | |
def create_optimizer(self): | |
# Create optimizer with reduced memory footprint | |
optimizer = super().create_optimizer() | |
# Force optimizer to use CPU offloading for states | |
for param_group in optimizer.param_groups: | |
for param in param_group['params']: | |
if param.requires_grad: | |
param.data = param.data.to("cpu") | |
if param.grad is not None: | |
param.grad.data = param.grad.data.to("cpu") | |
return optimizer | |
def training_step(self, *args, **kwargs): | |
# Memory cleanup before each training step | |
gc.collect() | |
torch.cuda.empty_cache() | |
return super().training_step(*args, **kwargs) | |
trainer = MemoryEfficientTrainer( | |
model=model_to_train, | |
args=training_args, | |
train_dataset=train_dataset, | |
data_collator=data_collator, | |
) | |
log.append("Trainer initialized with memory-efficient settings") | |
# --- Start Training --- | |
try: | |
# Final memory cleanup before training | |
gc.collect() | |
if torch.cuda.is_available(): | |
torch.cuda.empty_cache() | |
progress(0.5, desc="Starting training...") | |
log.append("Starting training with extreme memory optimization...") | |
# Train in smaller chunks to manage memory better | |
total_steps = len(train_dataset) // (batch_size * grad_accum_steps) | |
log.append(f"Total training steps: {total_steps}") | |
# Train the model | |
train_result = trainer.train() | |
progress(0.95, desc="Saving model...") | |
# Save final model (adapter weights) and training state | |
final_save_path = os.path.join(training_args.output_dir, "final_checkpoint") | |
log.append(f"Saving final model checkpoint to {final_save_path}...") | |
trainer.save_model(final_save_path) | |
trainer.save_state() | |
# Log metrics | |
metrics = train_result.metrics | |
trainer.log_metrics("train", metrics) | |
trainer.save_metrics("train", metrics) | |
for key, value in metrics.items(): | |
log.append(f"{key}: {value}") | |
# Print peak memory usage | |
if torch.cuda.is_available(): | |
peak_memory = torch.cuda.max_memory_allocated() / (1024**3) | |
log.append(f"Peak GPU memory usage: {peak_memory:.2f} GB") | |
except Exception as e: | |
error_msg = f"An error occurred during training: {str(e)}" | |
log.append(error_msg) | |
# Try to save checkpoint even if training failed | |
try: | |
# Save whatever we have | |
log.append("Attempting to save partial checkpoint...") | |
emergency_save_path = os.path.join(training_args.output_dir, "emergency_checkpoint") | |
trainer.save_model(emergency_save_path) | |
log.append(f"Saved emergency checkpoint to {emergency_save_path}") | |
except Exception as save_error: | |
log.append(f"Could not save emergency checkpoint: {save_error}") | |
return "\n".join(log) | |
progress(1.0, desc="Training complete!") | |
log.append("Training process complete successfully.") | |
return "\n".join(log) | |
# Define the Gradio interface | |
def create_interface(): | |
with gr.Blocks(title="Llama 3 8B RVQ Fine-tuning") as demo: | |
gr.Markdown("# Llama 3 8B RVQ LoRA Fine-tuning") | |
gr.Markdown("Fine-tune a Llama 3 8B model with RVQ token embeddings using LoRA with extreme memory optimization") | |
with gr.Row(): | |
with gr.Column(): | |
hf_username = gr.Textbox(label="HuggingFace Username", value="Twelve2five") | |
model_repo = gr.Textbox(label="Model Repository Name", value="llama-3-8b-rvq-resized") | |
dataset_repo = gr.Textbox(label="Dataset Repository Name", value="podcast-dialogue-rvq-pairs-3items") | |
with gr.Column(): | |
epochs = gr.Number(label="Number of Epochs", value=1, minimum=1, maximum=10) | |
batch_size = gr.Number(label="Batch Size per Device", value=1, minimum=1, maximum=8) | |
grad_accum = gr.Number(label="Gradient Accumulation Steps", value=16, minimum=8, maximum=32) | |
lr = gr.Number(label="Learning Rate", value=1e-4) | |
start_btn = gr.Button("Start Training") | |
output = gr.Textbox(label="Training Log", lines=20) | |
start_btn.click( | |
fn=train_model, | |
inputs=[hf_username, model_repo, dataset_repo, epochs, batch_size, grad_accum, lr], | |
outputs=output | |
) | |
return demo | |
# Create and launch the interface | |
demo = create_interface() | |
if __name__ == "__main__": | |
demo.launch() |