Spaces:
Running
Running
feat(sweep): update config
Browse files- tools/train/sweep.yaml +34 -23
tools/train/sweep.yaml
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
-
program:
|
| 2 |
-
entity:
|
| 3 |
-
project:
|
| 4 |
method: random
|
| 5 |
metric:
|
| 6 |
name: eval/loss
|
|
@@ -8,36 +8,47 @@ metric:
|
|
| 8 |
parameters:
|
| 9 |
learning_rate:
|
| 10 |
distribution: log_uniform
|
| 11 |
-
# from exp(min) to exp(max)
|
| 12 |
-
min: -
|
| 13 |
-
max: -5
|
| 14 |
gradient_accumulation_steps:
|
| 15 |
value: 8
|
| 16 |
warmup_steps:
|
| 17 |
-
|
| 18 |
-
|
| 19 |
command:
|
| 20 |
- python3
|
| 21 |
- ${program}
|
| 22 |
-
- "--
|
| 23 |
-
- "/
|
| 24 |
-
- "--
|
| 25 |
-
- "/
|
| 26 |
-
- "--
|
| 27 |
-
- "
|
| 28 |
-
- "--
|
| 29 |
-
- "--
|
| 30 |
-
- "--
|
| 31 |
-
-
|
| 32 |
-
- "--
|
| 33 |
-
-
|
|
|
|
|
|
|
| 34 |
- "--per_device_train_batch_size"
|
| 35 |
- 56
|
| 36 |
- "--per_device_eval_batch_size"
|
| 37 |
- 56
|
| 38 |
-
- "--
|
| 39 |
-
- 80
|
| 40 |
-
- "--no_decay"
|
| 41 |
- "--do_train"
|
| 42 |
- "--do_eval"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
- ${args}
|
|
|
|
| 1 |
+
program: train.py
|
| 2 |
+
entity: dalle-mini
|
| 3 |
+
project: dalle-mini
|
| 4 |
method: random
|
| 5 |
metric:
|
| 6 |
name: eval/loss
|
|
|
|
| 8 |
parameters:
|
| 9 |
learning_rate:
|
| 10 |
distribution: log_uniform
|
| 11 |
+
# from exp(min) to exp(max)
|
| 12 |
+
min: -6.9
|
| 13 |
+
max: -3.5
|
| 14 |
gradient_accumulation_steps:
|
| 15 |
value: 8
|
| 16 |
warmup_steps:
|
| 17 |
+
value: 4000
|
| 18 |
+
#TODO: outdated command
|
| 19 |
command:
|
| 20 |
- python3
|
| 21 |
- ${program}
|
| 22 |
+
- "--tokenizer_name"
|
| 23 |
+
- "boris/dalle-mini-tokenizer"
|
| 24 |
+
- "--config_name"
|
| 25 |
+
- "facebook/bart-large-cnn"
|
| 26 |
+
- "--dataset_repo_or_path"
|
| 27 |
+
- "boris/gis_vqgan_f16_16384"
|
| 28 |
+
- "--streaming"
|
| 29 |
+
- "--use_auth_token"
|
| 30 |
+
- "--image_vocab_size"
|
| 31 |
+
- 16384
|
| 32 |
+
- "--image_length"
|
| 33 |
+
- 256
|
| 34 |
+
- "--normalize_text"
|
| 35 |
+
- True
|
| 36 |
- "--per_device_train_batch_size"
|
| 37 |
- 56
|
| 38 |
- "--per_device_eval_batch_size"
|
| 39 |
- 56
|
| 40 |
+
- "--adafactor"
|
|
|
|
|
|
|
| 41 |
- "--do_train"
|
| 42 |
- "--do_eval"
|
| 43 |
+
- "--num_train_epochs"
|
| 44 |
+
- 1
|
| 45 |
+
- "--logging_steps"
|
| 46 |
+
- 40
|
| 47 |
+
- "--eval_steps"
|
| 48 |
+
- 800
|
| 49 |
+
- "--output_dir"
|
| 50 |
+
- "./output"
|
| 51 |
+
- "--overwrite_output_dir"
|
| 52 |
+
- "--max_train_samples"
|
| 53 |
+
- 10000000
|
| 54 |
- ${args}
|