--- job: extension config: name: "flux_lora_rami_v1" process: - type: 'sd_trainer' training_folder: "output_flux_lora_rami" trigger_word: "rami murad" device: cuda:0 network: type: "lora" linear: 16 linear_alpha: 16 save: dtype: float16 save_every: 250 max_step_saves_to_keep: 4 push_to_hub: false datasets: - folder_path: "ai-toolkit/images" caption_ext: "txt" caption_dropout_rate: 0.05 shuffle_tokens: false cache_latents_to_disk: true resolution: [1024] train: batch_size: 1 bypass_guidance_embedding: true steps: 3000 gradient_accumulation: 1 train_unet: true train_text_encoder: false gradient_checkpointing: true noise_scheduler: "flowmatch" optimizer: "adamw8bit" lr: 1e-4 dtype: fp16 disable_sampling: true ema_config: use_ema: true ema_decay: 0.99 model: name_or_path: "black-forest-labs/FLUX.1-dev" is_flux: true load_in_8bit: true quantize: true quantize_kwargs: exclude: - "*time_text_embed*" sample: sampler: "flowmatch" sample_every: 250 width: 1024 height: 1024 prompts: - "[trigger] smiling in front of a white background, headshot, studio lighting" - "[trigger] wearing a suit, standing in a futuristic city, cinematic lighting" - "[trigger] in a medieval outfit, standing in front of a castle" - "[trigger] sitting at a wooden desk, writing in a notebook" - "[trigger] relaxing at the beach during sunset, soft light" - "[trigger] on stage giving a TED talk, spotlight" - "[trigger] in a forest with sunbeams shining through the trees" neg: "" seed: 42 walk_seed: true guidance_scale: 4 sample_steps: 25 meta: name: "[name]" version: '1.0'