julien.blanchon
add app
c8c12e9
dataset:
name: mvtec #options: [mvtec, btech, folder]
format: mvtec
path: ./datasets/MVTec
category: bottle
task: segmentation
image_size: 256
train_batch_size: 16
test_batch_size: 16
inference_batch_size: 16
fiber_batch_size: 64
num_workers: 8
transform_config:
train: null
val: null
create_validation_set: false
model:
name: cflow
backbone: wide_resnet50_2
layers:
- layer2
- layer3
- layer4
decoder: freia-cflow
condition_vector: 128
coupling_blocks: 8
clamp_alpha: 1.9
soft_permutation: false
lr: 0.0001
early_stopping:
patience: 2
metric: pixel_AUROC
mode: max
normalization_method: min_max # options: [null, min_max, cdf]
threshold:
image_default: 0
pixel_default: 0
adaptive: true
metrics:
image:
- F1Score
- AUROC
pixel:
- F1Score
- AUROC
project:
seed: 0
path: ./results
log_images_to: [local]
logger: false # options: [tensorboard, wandb, csv] or combinations.
# PL Trainer Args. Don't add extra parameter here.
trainer:
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
accumulate_grad_batches: 1
amp_backend: native
auto_lr_find: false
auto_scale_batch_size: false
auto_select_gpus: false
benchmark: false
check_val_every_n_epoch: 1
default_root_dir: null
detect_anomaly: false
deterministic: false
enable_checkpointing: true
enable_model_summary: true
enable_progress_bar: true
fast_dev_run: false
gpus: null # Set automatically
gradient_clip_val: 0
ipus: null
limit_predict_batches: 1.0
limit_test_batches: 1.0
limit_train_batches: 1.0
limit_val_batches: 1.0
log_every_n_steps: 50
log_gpu_memory: null
max_epochs: 50
max_steps: -1
max_time: null
min_epochs: null
min_steps: null
move_metrics_to_cpu: false
multiple_trainloader_mode: max_size_cycle
num_nodes: 1
num_processes: 1
num_sanity_val_steps: 0
overfit_batches: 0.0
plugins: null
precision: 32
profiler: null
reload_dataloaders_every_n_epochs: 0
replace_sampler_ddp: true
strategy: null
sync_batchnorm: false
tpu_cores: null
track_grad_norm: -1
val_check_interval: 1.0