forked from mrcavalcanti/invoke-training
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfinetune_lora_sdxl_pokemon_1x8gb_example.yaml
50 lines (44 loc) · 1.26 KB
/
finetune_lora_sdxl_pokemon_1x8gb_example.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
# Training mode: Finetuning with LoRA
# Base model: SDXL 1.0
# Dataset: Pokemon
# GPU: 1 x 8GB
# Notes:
# This config file has been optimized for 2 primary goals:
# - Minimize VRAM usage so that an SDXL model can be trained with only 8GB of VRAM.
# - Achieve reasonable results *quickly* (<15mins) for demo purposes.
output:
base_output_dir: output/
optimizer:
learning_rate: 1.0
optimizer:
optimizer_type: Prodigy
weight_decay: 0.01
use_bias_correction: True
safeguard_warmup: True
dataset:
dataset_name: lambdalabs/pokemon-blip-captions
image_transforms:
resolution: 512
# General
model: stabilityai/stable-diffusion-xl-base-1.0
vae_model: madebyollin/sdxl-vae-fp16-fix
train_text_encoder: False
cache_text_encoder_outputs: True
enable_cpu_offload_during_validation: True
seed: 1
gradient_accumulation_steps: 4
mixed_precision: fp16
xformers: True
gradient_checkpointing: True
# Dataset size is 833. Set max_train_steps to train for 2 epochs.
# ceil(833 / 4) * 3
max_train_steps: 627
save_every_n_epochs: 1
save_every_n_steps: null
max_checkpoints: 100
validation_prompts:
- A cute yoda pokemon creature.
- A cute astronaut pokemon creature.
validate_every_n_epochs: 1
train_batch_size: 1
num_validation_images_per_prompt: 3