Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/brute_force_min_pad_waste.py +154 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/bucket.py +169 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/bucket_analysis.svg +1709 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/bucket_analysis_bar_squad.svg +1772 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/datasets_library.py +123 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/requirements.txt +7 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/run_demo_bucketing_gaussian.py +14 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/run_demo_squad.py +9 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/squad.svg +1473 -0
- docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/test.py +88 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/dev_mn.yaml +128 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/dev_mn_dummy.yaml +109 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/inpainting/v1-edgeinpainting.yaml +157 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/inpainting/v1-finetune-for-inpainting-laion-aesthetic-larger-masks-and-ucfg.yaml +156 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/inpainting/v1-finetune-for-inpainting-laion-aesthetic-larger-masks.yaml +149 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/inpainting/v1-finetune-for-inpainting-laion-iaesthe.yaml +144 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512.yaml +135 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder.yaml +131 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-1p4B-multinode.yaml +128 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-clip-encoder-dev.yaml +127 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-ldm-unfrozen-dev.yaml +129 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-ldm-vae-f8.yaml +130 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-multinode-clip-encoder-f16-1024-laion-hr.yaml +133 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-multinode-clip-encoder-f16-256-pretraining.yaml +127 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-multinode-clip-encoder-f16-768-laion-hr-inference.yaml +65 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-multinode-clip-encoder-f16-768-laion-hr.yaml +133 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-multinode-clip-encoder-f16-768.yaml +130 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-t5-encoder-dev.yaml +128 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-upscale-clip-encoder-f16-1024.yaml +170 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-v2-clip-encoder-improved_aesthetics-256.yaml +137 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-v2-clip-encoder-improved_aesthetics-512.yaml +135 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/upscaling/upscale-v1-with-f16.yaml +214 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/v1-inference.yaml +69 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/v1_improvedaesthetics.yaml +135 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/v1_laionhr.yaml +135 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/v2_laionhr1024.yaml +132 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/v2_laionhr1024_2.yaml +132 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/v2_pretraining.yaml +131 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/v3_pretraining.yaml +137 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/__init__.py +0 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/base.py +125 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/coco.py +253 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/dummy.py +34 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/imagenet.py +394 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/inpainting/__init__.py +0 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/inpainting/synthetic_mask.py +166 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/laion.py +516 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/lsun.py +92 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/lr_scheduler.py +98 -0
- docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/models/autoencoder.py +449 -0
docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/brute_force_min_pad_waste.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import copy
|
5 |
+
import collections
|
6 |
+
from itertools import combinations
|
7 |
+
import time
|
8 |
+
|
9 |
+
class BruteForceOptimalBucketing():
|
10 |
+
def __init__(self, inp_dist, num_buckets, numbucket_threshold=10000000, verbose=False, collect=1000, print_freq=10000):
|
11 |
+
self.inp_dist = collections.OrderedDict(sorted(inp_dist.items()))
|
12 |
+
# not sure if deepcopy preserves order, hence resorting
|
13 |
+
self.inp_dist_orig = collections.OrderedDict(sorted(copy.deepcopy(self.inp_dist).items()))
|
14 |
+
self.num_buckets = num_buckets
|
15 |
+
self.numbucket_threshold = numbucket_threshold
|
16 |
+
if numbucket_threshold > 0:
|
17 |
+
self.simplify_distribution()
|
18 |
+
self.verbose = verbose
|
19 |
+
if self.verbose:
|
20 |
+
print('Original distribution: ', self.inp_dist_orig)
|
21 |
+
print('Modified distribution: ', self.inp_dist)
|
22 |
+
print('kl divergence: ', self.kl_div())
|
23 |
+
self.max_shape = max(self.inp_dist)
|
24 |
+
key_col = []
|
25 |
+
val_col = []
|
26 |
+
for k in sorted(self.inp_dist):
|
27 |
+
key_col += [k]
|
28 |
+
val_col += [self.inp_dist[k]]
|
29 |
+
self.num_shapes = len(key_col)
|
30 |
+
self.key_col_tensor = np.array(key_col) # sorted by keys (first column)
|
31 |
+
self.val_col_tensor = np.array(val_col)
|
32 |
+
self.collect = collect
|
33 |
+
self.print_freq = print_freq
|
34 |
+
#self.key_col_tensor_tiled = np.tile(self.key_col_tensor, (num_buckets,self.collect,1)).T
|
35 |
+
def kl_div(self):
|
36 |
+
total = sum(self.inp_dist.values())
|
37 |
+
kld = 0
|
38 |
+
for k in self.inp_dist_orig:
|
39 |
+
q = self.inp_dist_orig[k] / total
|
40 |
+
tmp = self.inp_dist.get(k,0)
|
41 |
+
if tmp == 0:
|
42 |
+
term = 0
|
43 |
+
else:
|
44 |
+
term = tmp * np.log((tmp / total) / q)
|
45 |
+
kld += term
|
46 |
+
return kld
|
47 |
+
def simplify_distribution(self):
|
48 |
+
while self.num_possible_buckets() > self.numbucket_threshold:
|
49 |
+
self.fuse_inp_dist()
|
50 |
+
def fuse_inp_dist(self):
|
51 |
+
# helper finds the smallest frequency (which will be removed)
|
52 |
+
def helper(d):
|
53 |
+
least_count = None
|
54 |
+
for idx, k in enumerate(d):
|
55 |
+
if least_count is None or d[k] < least_count:
|
56 |
+
least_count = d[k]
|
57 |
+
least_idx = idx
|
58 |
+
to_be_removed = k
|
59 |
+
return least_count, least_idx, to_be_removed
|
60 |
+
sum_vals_before = sum(self.inp_dist.values())
|
61 |
+
assert sum_vals_before == sum(self.inp_dist_orig.values())
|
62 |
+
# Remove the last (largest) shape from the search of lowest frequency to be deleted,
|
63 |
+
# because that can't be deleted
|
64 |
+
tmp = collections.OrderedDict(sorted(copy.deepcopy(self.inp_dist).items()))
|
65 |
+
tmp.pop(max(tmp))
|
66 |
+
# search for the shape with least frequency
|
67 |
+
least_count, least_idx, to_be_removed = helper(tmp)
|
68 |
+
# fuse the shape with least frequency with its right neighbour (next bigger shape)
|
69 |
+
fuse_with = least_idx+1
|
70 |
+
for idx, k in enumerate(self.inp_dist):
|
71 |
+
if fuse_with == idx:
|
72 |
+
self.inp_dist[k] = self.inp_dist[k]+least_count
|
73 |
+
# Remove the shape with least frequency
|
74 |
+
self.inp_dist.pop(to_be_removed)
|
75 |
+
sum_vals_after = sum(self.inp_dist.values())
|
76 |
+
assert sum_vals_before == sum_vals_after
|
77 |
+
def num_possible_buckets(self):
|
78 |
+
from functools import reduce
|
79 |
+
import operator as op
|
80 |
+
n = len(self.inp_dist)-1
|
81 |
+
r = self.num_buckets-1
|
82 |
+
r = min(r, n-r)
|
83 |
+
numer = reduce(op.mul, range(n, n-r, -1), 1)
|
84 |
+
denom = reduce(op.mul, range(1, r+1), 1)
|
85 |
+
return numer // denom # or / in Python 2
|
86 |
+
# function to evaluate
|
87 |
+
def num_padding(self, buckets):
|
88 |
+
tot_pad = 0
|
89 |
+
cur_bucket_idx = 0
|
90 |
+
for k in self.inp_dist_orig: # self.inp_dist is expected to be sorted, hence we can do the cur_bucket_idx optimization
|
91 |
+
while True:
|
92 |
+
bucket = buckets[cur_bucket_idx]
|
93 |
+
if k > bucket:
|
94 |
+
cur_bucket_idx += 1
|
95 |
+
else:
|
96 |
+
break
|
97 |
+
padding = (bucket - k) * self.inp_dist_orig[k]
|
98 |
+
assert padding >= 0
|
99 |
+
tot_pad += padding
|
100 |
+
return tot_pad
|
101 |
+
def find_optimal_buckets(self):
|
102 |
+
result_best = None
|
103 |
+
sizes = [k for k in self.inp_dist.keys()]
|
104 |
+
sizes_without_largest = sizes[:-1]
|
105 |
+
num = self.num_possible_buckets()
|
106 |
+
if self.verbose:
|
107 |
+
print(f'Combinations to try: {num}')
|
108 |
+
t0 = time.time()
|
109 |
+
collect_ctr = 0
|
110 |
+
self.idx_collection = []
|
111 |
+
self.bucket_boundary_collection = []
|
112 |
+
result_best = None
|
113 |
+
def update_helper(idx, result_best, best_padwaste_in_curr_collection, best_bucket_in_curr_collection):
|
114 |
+
if result_best is None or result_best['wasted_padding'] > best_padwaste_in_curr_collection:
|
115 |
+
tmp = {'wasted_padding':best_padwaste_in_curr_collection, 'idx':idx, 'buckets':copy.deepcopy(best_bucket_in_curr_collection)}
|
116 |
+
if self.verbose:
|
117 |
+
print('Best till now: ', tmp)
|
118 |
+
return tmp
|
119 |
+
else:
|
120 |
+
return result_best
|
121 |
+
for idx, bucket_boundary in (enumerate(combinations(sizes_without_largest, self.num_buckets-1))):
|
122 |
+
if collect_ctr == self.collect:
|
123 |
+
best_padwaste_in_curr_collection, best_bucket_in_curr_collection, best_idx = self.process_collection()
|
124 |
+
result_best = update_helper(idx - self.collect + best_idx, result_best, best_padwaste_in_curr_collection, best_bucket_in_curr_collection)
|
125 |
+
self.idx_collection = []
|
126 |
+
self.bucket_boundary_collection = []
|
127 |
+
collect_ctr = 0
|
128 |
+
self.idx_collection.append(idx)
|
129 |
+
self.bucket_boundary_collection.append(list(bucket_boundary) + [sizes[-1]])
|
130 |
+
collect_ctr += 1
|
131 |
+
if idx % self.print_freq == self.print_freq-1 and self.verbose:
|
132 |
+
curr_time = time.time()
|
133 |
+
time_till_now = curr_time-t0
|
134 |
+
projected_time_left = time_till_now * ((num / idx) - 1) if idx > 0 else -1
|
135 |
+
print(f'{idx}/{num}: {(idx/num):.3f}. Time taken till now {time_till_now:.3f}. Projected time left {projected_time_left:.3f}. Best {result_best}')
|
136 |
+
if len(self.idx_collection) > 0:
|
137 |
+
best_padwaste_in_curr_collection, best_bucket_in_curr_collection, best_idx = self.process_collection()
|
138 |
+
result_best = update_helper(idx - len(self.idx_collection) + best_idx, result_best, best_padwaste_in_curr_collection, best_bucket_in_curr_collection)
|
139 |
+
return result_best
|
140 |
+
def process_collection(self):
|
141 |
+
# self.collect x self.num_buckets
|
142 |
+
bucket_boundary_collection = np.array(self.bucket_boundary_collection)
|
143 |
+
# self.num_shapes x self.collect x self.num_buckets
|
144 |
+
buckets_tiled = np.tile(np.array(bucket_boundary_collection), (self.num_shapes, 1, 1))
|
145 |
+
# self.num_shapes x self.collect
|
146 |
+
key_col_tensor_tiled = np.tile(self.key_col_tensor, (self.num_buckets,bucket_boundary_collection.shape[0],1)).T
|
147 |
+
bucket_idx = np.argmin(key_col_tensor_tiled > buckets_tiled, 2)
|
148 |
+
bucket_for_each_shape = np.take_along_axis(bucket_boundary_collection, bucket_idx.T, 1)
|
149 |
+
padding_waste_per_shape = bucket_for_each_shape - np.expand_dims(self.key_col_tensor, 0)
|
150 |
+
#assert np.all(padding_waste_per_shape >= 0)
|
151 |
+
total_padding_waste = np.sum((padding_waste_per_shape * self.val_col_tensor), 1)
|
152 |
+
#assert len(total_padding_waste)
|
153 |
+
best_idx = np.argmin(total_padding_waste)
|
154 |
+
return total_padding_waste[best_idx], bucket_boundary_collection[best_idx], best_idx
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/bucket.py
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import time
|
5 |
+
import pickle as pkl
|
6 |
+
|
7 |
+
# A decorator for input/output validation of bucketing algorithms
|
8 |
+
def get_check_bucket(allow_none_return):
|
9 |
+
# some bucketing algos like LP can return None
|
10 |
+
def check_bucket(bucketer):
|
11 |
+
def helper(shapes, num_buckets, *args, **kwargs):
|
12 |
+
for k in shapes:
|
13 |
+
assert type(k) == type(1)
|
14 |
+
assert k >= 0
|
15 |
+
assert num_buckets >= 1
|
16 |
+
assert type(num_buckets) == type(1)
|
17 |
+
buckets = bucketer(shapes, num_buckets, *args, **kwargs)
|
18 |
+
if allow_none_return:
|
19 |
+
if buckets is None:
|
20 |
+
return None
|
21 |
+
assert len(buckets) <= num_buckets
|
22 |
+
assert buckets[-1] <= max(shapes)
|
23 |
+
return buckets
|
24 |
+
return helper
|
25 |
+
return check_bucket
|
26 |
+
|
27 |
+
# Percentile based bucketing
|
28 |
+
@get_check_bucket(False)
|
29 |
+
def percentile_bucket(shapes, num_buckets):
|
30 |
+
buckets = np.unique(
|
31 |
+
np.percentile(
|
32 |
+
shapes,
|
33 |
+
np.linspace(0, 100, num_buckets + 1),
|
34 |
+
interpolation="lower",
|
35 |
+
)[1:]
|
36 |
+
)
|
37 |
+
return buckets
|
38 |
+
|
39 |
+
# LP based bucketing
|
40 |
+
@get_check_bucket(True)
|
41 |
+
def lp_bucket(shapes, num_buckets):
|
42 |
+
from pulp import LpMinimize, LpProblem, lpSum, PULP_CBC_CMD, LpStatus, LpVariable, LpInteger
|
43 |
+
def padding_overhead(bucket_size,sample_size,num_samples):
|
44 |
+
if (sample_size-bucket_size)>0:
|
45 |
+
return 1e32
|
46 |
+
else:
|
47 |
+
return (bucket_size-sample_size)*num_samples
|
48 |
+
data_unique=np.unique(shapes)
|
49 |
+
prob = LpProblem('OptimalBucket',LpMinimize)
|
50 |
+
Combinations=[]; padLoss={}; Indicators={}; DeltaM={}
|
51 |
+
|
52 |
+
for s in data_unique:
|
53 |
+
num_samples=(shapes==s).sum()
|
54 |
+
for b in data_unique:
|
55 |
+
Combinations.append('ind' + '_{}b_{}s'.format(b,s))
|
56 |
+
padLoss['ind' + '_{}b_{}s'.format(b,s)] = padding_overhead(b,s,num_samples)
|
57 |
+
Indicators['ind' + '_{}b_{}s'.format(b,s)] = LpVariable('ind' + '_{}b_{}s'.format(b,s),0,1,LpInteger)
|
58 |
+
|
59 |
+
prob += lpSum([Indicators[ind]*padLoss[ind] for ind in padLoss.keys()]) # Objective (minimize padding)
|
60 |
+
|
61 |
+
for s in data_unique:
|
62 |
+
prob += lpSum([Indicators[key] for key in Combinations if '_{}s'.format(s) in key]) == 1
|
63 |
+
bucket_indecators=[]
|
64 |
+
for b in data_unique:
|
65 |
+
Indicators['ind_bucket' + '_{}'.format(b)]=LpVariable('ind_bucket' + '_{}'.format(b),0,1,LpInteger)
|
66 |
+
bucket_indecators.append(Indicators['ind_bucket' + '_{}'.format(b)])
|
67 |
+
for b in data_unique:
|
68 |
+
prob += lpSum([Indicators[key] for key in Combinations if '_{}b'.format(b) in key]) <= Indicators['ind_bucket' + '_{}'.format(b)]*len(data_unique)
|
69 |
+
|
70 |
+
prob += lpSum(bucket_indecators)==num_buckets
|
71 |
+
|
72 |
+
prob.solve(PULP_CBC_CMD(msg=0))
|
73 |
+
LpStatus[prob.status]
|
74 |
+
|
75 |
+
ip_buckets=[]
|
76 |
+
for v in prob.variables():
|
77 |
+
if 'ind_bucket' in v.name and v.value() > 0:
|
78 |
+
ip_buckets.append(int(v.name.split('_')[-1]))
|
79 |
+
|
80 |
+
if (prob.status==-1):
|
81 |
+
print('Infeasable')
|
82 |
+
return None
|
83 |
+
else:
|
84 |
+
return tuple(sorted(ip_buckets))
|
85 |
+
|
86 |
+
# Pad to max or constant bucketing
|
87 |
+
@get_check_bucket(False)
|
88 |
+
def const_bucket(shapes, num_buckets):
|
89 |
+
return [max(shapes)]
|
90 |
+
|
91 |
+
# Uniform intervals bucketing
|
92 |
+
@get_check_bucket(False)
|
93 |
+
def uniform_bucket(shapes, num_buckets):
|
94 |
+
mn = min(shapes)
|
95 |
+
mx = max(shapes)
|
96 |
+
step = (mx - mn) / num_buckets
|
97 |
+
buckets = [mx]
|
98 |
+
curr_bucket = mx
|
99 |
+
step = (mx - mn) / num_buckets
|
100 |
+
for i in range(num_buckets-1):
|
101 |
+
curr_bucket = curr_bucket - step
|
102 |
+
buckets = [curr_bucket] + buckets
|
103 |
+
buckets = [round(k) for k in buckets]
|
104 |
+
return buckets
|
105 |
+
|
106 |
+
# Brute force min pad waste bucketing
|
107 |
+
@get_check_bucket(False)
|
108 |
+
def brute_force_min_pad_waste(shapes, num_buckets, max_elems=10000000):
|
109 |
+
from brute_force_min_pad_waste import BruteForceOptimalBucketing
|
110 |
+
size_freq = {}
|
111 |
+
for k in shapes:
|
112 |
+
size_freq[k] = size_freq.get(k,0)+1
|
113 |
+
ob = BruteForceOptimalBucketing(size_freq, num_buckets, numbucket_threshold=max_elems)
|
114 |
+
res = ob.find_optimal_buckets()
|
115 |
+
return res['buckets']
|
116 |
+
|
117 |
+
# Lloyd-Max quantization based bucketing
|
118 |
+
@get_check_bucket(False)
|
119 |
+
def lloyd_max_bucketing(shapes, num_buckets, max_steps=20):
|
120 |
+
from lloyd_max_bucket import lloydmax
|
121 |
+
from scipy.interpolate import CubicSpline
|
122 |
+
hist = {}
|
123 |
+
for k in shapes:
|
124 |
+
hist[k] = hist.get(k,0) + 1
|
125 |
+
x = []
|
126 |
+
y = []
|
127 |
+
for k in sorted(hist.keys()):
|
128 |
+
x += [k]
|
129 |
+
y += [hist[k]/sum(hist.values())]
|
130 |
+
pdf = CubicSpline(x, y)
|
131 |
+
repr = uniform_bucket(shapes, num_buckets)
|
132 |
+
thresholds = list((np.array(repr[:-1]) + np.array(repr[1:]))/2)
|
133 |
+
x,t,e = lloydmax(thresholds,repr,0.01, pdf, min(shapes), max(shapes), max_steps)
|
134 |
+
buckets = [int(k) for k in t] + [max(shapes)]
|
135 |
+
return buckets
|
136 |
+
|
137 |
+
def normalize_trial_buckets(trial_buckets):
|
138 |
+
if trial_buckets is None:
|
139 |
+
trial_buckets = 10
|
140 |
+
if type(trial_buckets) == type(1):
|
141 |
+
trial_buckets = range(1,trial_buckets)
|
142 |
+
return trial_buckets
|
143 |
+
|
144 |
+
def eval_bucketing(buckets, shapes):
|
145 |
+
tot = sum([min([i for i in buckets if i >= k]) for k in shapes])
|
146 |
+
return tot, tot/len(shapes)
|
147 |
+
|
148 |
+
|
149 |
+
def bucket_analysis(shapes, bucket_algos, trial_buckets=None):
|
150 |
+
trial_buckets = normalize_trial_buckets(trial_buckets)
|
151 |
+
results = {}
|
152 |
+
for algoidx, (bucket_algo_name, bucket_algo) in enumerate(bucket_algos):
|
153 |
+
print(f'Processing {bucket_algo_name}')
|
154 |
+
res = {}
|
155 |
+
for num_bucket in trial_buckets:
|
156 |
+
print(f'Processing bucket={num_bucket}')
|
157 |
+
t0 = time.time()
|
158 |
+
buckets = bucket_algo(shapes, num_bucket)
|
159 |
+
t1 = time.time()
|
160 |
+
if buckets is None:
|
161 |
+
print(f'Failed to generate buckets for {bucket_algo_name} for {num_bucket} buckets. Falling back to const bucketing')
|
162 |
+
buckets = const_bucket(shapes, num_bucket)
|
163 |
+
totwaste, avgwaste = eval_bucketing(buckets, shapes)
|
164 |
+
res[num_bucket] = {'totwaste':totwaste, 'avgwaste':avgwaste, 'time':t1-t0}
|
165 |
+
print(algoidx, num_bucket, totwaste, avgwaste, t1-t0)
|
166 |
+
assert bucket_algo_name not in results
|
167 |
+
results[bucket_algo_name] = res
|
168 |
+
pkl.dump(results, open('res.pkl', 'wb'), protocol=pkl.HIGHEST_PROTOCOL)
|
169 |
+
return results
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/bucket_analysis.svg
ADDED
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/bucket_analysis_bar_squad.svg
ADDED
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/datasets_library.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
|
2 |
+
|
3 |
+
import random, itertools
|
4 |
+
from tqdm import tqdm
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
def get_cdf(pdf):
|
8 |
+
# list of tuples, each tuple is a k-v pair (k is the number, v its probability)
|
9 |
+
sorted_key_pdf = sorted(pdf, key=lambda x:x[0]) # sort by keys
|
10 |
+
run_sum = 0
|
11 |
+
cdf = []
|
12 |
+
for i in range(len(sorted_key_pdf)):
|
13 |
+
k, v = sorted_key_pdf[i]
|
14 |
+
run_sum += v
|
15 |
+
assert run_sum <= 1 or (1-run_sum) < 0.000000001
|
16 |
+
cdf += [(k, 1 if run_sum > 1 else run_sum)]
|
17 |
+
last_elem = cdf[-1]
|
18 |
+
assert last_elem[1] <= 1.0
|
19 |
+
assert (1.0 - last_elem[1]) < 0.000001
|
20 |
+
cdf[-1] = (cdf[-1][0], 1.0) # set the last elem to 1
|
21 |
+
return cdf
|
22 |
+
|
23 |
+
def hist_to_tuplelist(pdf):
|
24 |
+
inp_is_hist = type(pdf) == type({1:2})
|
25 |
+
if inp_is_hist:
|
26 |
+
# pdf is a histograms. values add to 1 and are positive
|
27 |
+
pdf = [(k,pdf[k]) for k in pdf]
|
28 |
+
return pdf, inp_is_hist
|
29 |
+
|
30 |
+
def format_convert(f):
|
31 |
+
def helper(pdf, bs, aggregator):
|
32 |
+
pdf, inp_is_hist = hist_to_tuplelist(pdf)
|
33 |
+
out = f(pdf, bs, aggregator)
|
34 |
+
if inp_is_hist:
|
35 |
+
return {k:v for k,v in out}
|
36 |
+
else:
|
37 |
+
return out
|
38 |
+
return helper
|
39 |
+
|
40 |
+
@format_convert
|
41 |
+
def aggregator_batch(pdf, bs, aggregator):
|
42 |
+
assert aggregator in ['min', 'max']
|
43 |
+
pdf = sorted(pdf, key=lambda x:x[0])
|
44 |
+
cdf = get_cdf(pdf)
|
45 |
+
assert len(cdf) == len(pdf)
|
46 |
+
result = []
|
47 |
+
for p, c in zip(pdf, cdf):
|
48 |
+
kp, pval = p
|
49 |
+
kc, cval = c
|
50 |
+
assert kp == kc
|
51 |
+
val = bs * pval * ((cval if aggregator == 'max' else (1-cval)) ** (bs-1))
|
52 |
+
result += [(kp, val)]
|
53 |
+
# the resulting pdf might be unnormalized, probably due to computational issues? normalizing it
|
54 |
+
result_val_tot = sum([k[1] for k in result])
|
55 |
+
result = [(k[0], k[1]/result_val_tot) for k in result]
|
56 |
+
return result
|
57 |
+
|
58 |
+
|
59 |
+
def generate_random_gaussian():
|
60 |
+
import numpy as np
|
61 |
+
while True:
|
62 |
+
x = np.random.normal(500, 50)
|
63 |
+
if x < 2: # truncating it so that its not negative
|
64 |
+
x = 2
|
65 |
+
x = round(x) # its a discrete distribution, so rounding it off
|
66 |
+
yield x
|
67 |
+
|
68 |
+
def gaussian(num_samples):
|
69 |
+
return list(itertools.islice(generate_random_gaussian(), num_samples))
|
70 |
+
|
71 |
+
def batched_gaussian(orig_list, bs, aggregator):
|
72 |
+
return [aggregator(orig_list[i * bs : (i+1) * bs]) for i in range(len(orig_list) // bs)]
|
73 |
+
|
74 |
+
|
75 |
+
def batch_by_formula(orig_list, bs, aggregator):
|
76 |
+
count_hist = {}
|
77 |
+
for item in orig_list:
|
78 |
+
count_hist[item] = count_hist.get(item, 0) + 1
|
79 |
+
total = sum(list(count_hist.values()))
|
80 |
+
pdf_hist = {k:count_hist[k]/total for k in count_hist}
|
81 |
+
return aggregator_batch(pdf_hist, bs, aggregator)
|
82 |
+
|
83 |
+
def sample_from_pdf(pdf, num_samples):
|
84 |
+
pdf, _ = hist_to_tuplelist(pdf)
|
85 |
+
nums = [k[0] for k in pdf]
|
86 |
+
prob = [k[1] for k in pdf]
|
87 |
+
return np.random.choice(nums, num_samples, p=prob)
|
88 |
+
|
89 |
+
|
90 |
+
def squad(bs=1, clip=None):
|
91 |
+
print('Start squad bs =',bs)
|
92 |
+
from datasets import load_dataset
|
93 |
+
from torch.utils.data import DataLoader
|
94 |
+
from transformers import AutoTokenizer
|
95 |
+
import torch
|
96 |
+
|
97 |
+
# Pad to max length sentence in each batch
|
98 |
+
def collate(batch):
|
99 |
+
def pad(item, val, maxlen):
|
100 |
+
return torch.tensor([i + [val]*(maxlen-len(i)) for i in item])
|
101 |
+
token = [k['token_type_ids'] for k in batch]
|
102 |
+
attention = [k['attention_mask'] for k in batch]
|
103 |
+
inp = [k['input_ids'] for k in batch]
|
104 |
+
token_lens = [len(i) for i in token]
|
105 |
+
# Find the max length sentence in this batch
|
106 |
+
max_len = max(token_lens)
|
107 |
+
assert token_lens == [len(i) for i in attention] == [len(i) for i in inp]
|
108 |
+
return {'token_type_ids': pad(token, 0, max_len), 'attention_mask': pad(attention, 0, max_len), 'input_ids': pad(inp, 0, max_len)}
|
109 |
+
|
110 |
+
|
111 |
+
tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
|
112 |
+
|
113 |
+
squad_dataset = load_dataset('squad')
|
114 |
+
tokenized_dataset = squad_dataset.map(lambda x: tokenizer(x['context']), batched=True)
|
115 |
+
|
116 |
+
dt = DataLoader(tokenized_dataset['train'], batch_size=bs, num_workers=2, collate_fn=collate)
|
117 |
+
lens = []
|
118 |
+
for idx, data in tqdm(enumerate(dt)):
|
119 |
+
lens += [data['input_ids'].shape[1]]
|
120 |
+
if clip is not None and len(lens) >= clip:
|
121 |
+
break
|
122 |
+
print('Done squad bs =', bs)
|
123 |
+
return lens
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
matplotlib
|
2 |
+
tqdm
|
3 |
+
datasets
|
4 |
+
transformers
|
5 |
+
pulp
|
6 |
+
scipy
|
7 |
+
pytest
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/run_demo_bucketing_gaussian.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
|
2 |
+
|
3 |
+
import itertools
|
4 |
+
from plotting import plot_bucket_analysis_results
|
5 |
+
from bucket import bucket_analysis, lp_bucket, const_bucket, uniform_bucket, percentile_bucket, lloyd_max_bucketing, brute_force_min_pad_waste
|
6 |
+
from datasets_library import generate_random_gaussian
|
7 |
+
|
8 |
+
shapes = list(itertools.islice(generate_random_gaussian(), 1000))
|
9 |
+
results = bucket_analysis(shapes, [("lp_bucket", lp_bucket), ("const_bucket", const_bucket), ("uniform_bucket", uniform_bucket), \
|
10 |
+
("percentile_bucket", percentile_bucket), ("lloyd_max_bucketing", lloyd_max_bucketing), \
|
11 |
+
("brute_force_min_pad_waste", brute_force_min_pad_waste)], [2,3,4,5,6,10,20])
|
12 |
+
plot_bucket_analysis_results(results, 'bucket_analysis_bar_gaussian.svg')
|
13 |
+
|
14 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/run_demo_squad.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
|
2 |
+
|
3 |
+
from datasets_library import squad
|
4 |
+
from plotting import plotter
|
5 |
+
|
6 |
+
|
7 |
+
if __name__ == '__main__':
|
8 |
+
print("Plotting squad, takes 2-3 mins to run")
|
9 |
+
plotter([squad(1), squad(4), squad(16), squad(64), squad(256), squad(512)], 'squad.svg', ['bs='+str(bs) for bs in [1,4,16,64,256,512]])
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/squad.svg
ADDED
|
docker/intel_code/llama13b/Model-References/PyTorch/examples/bucketing/test.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2023 Habana Labs, Ltd. an Intel Company
|
2 |
+
|
3 |
+
from datasets_library import gaussian, batched_gaussian, batch_by_formula, sample_from_pdf, generate_random_gaussian
|
4 |
+
import numpy as np
|
5 |
+
np.random.seed(0)
|
6 |
+
from bucket import bucket_analysis, lp_bucket, const_bucket, uniform_bucket, percentile_bucket, lloyd_max_bucketing, brute_force_min_pad_waste
|
7 |
+
import itertools
|
8 |
+
|
9 |
+
def test_squad_batching():
|
10 |
+
print("Plotting gaussian")
|
11 |
+
num_samples = 100000
|
12 |
+
bs = 4
|
13 |
+
gs = gaussian(num_samples)
|
14 |
+
print(sum(gs))
|
15 |
+
assert len(gs) == 100000
|
16 |
+
assert np.abs(np.mean(gs) - 500) <= 2
|
17 |
+
assert np.abs(np.var(gs) - 2486.8457) <= 20
|
18 |
+
orig = batched_gaussian(gs, 1, max)
|
19 |
+
assert len(orig) == 100000
|
20 |
+
assert set(orig) == set(gs)
|
21 |
+
max_batch4 = batched_gaussian(gs, bs, max)
|
22 |
+
assert len(max_batch4) == 25000
|
23 |
+
assert np.mean(max_batch4) > np.mean(gs)
|
24 |
+
assert np.var(max_batch4) < np.var(gs)
|
25 |
+
min_batch4 = batched_gaussian(gs, bs, min)
|
26 |
+
assert len(min_batch4) == 25000
|
27 |
+
assert np.mean(min_batch4) < np.mean(gs)
|
28 |
+
assert np.var(min_batch4) < np.var(gs)
|
29 |
+
max_formula_batch4 = sample_from_pdf(batch_by_formula(gs, bs, 'max'), num_samples)
|
30 |
+
assert len(max_formula_batch4) == 100000
|
31 |
+
assert np.abs(np.mean(max_formula_batch4) - np.mean(max_batch4)) < 5
|
32 |
+
assert np.abs(np.var(max_formula_batch4) - np.var(max_batch4)) < 40
|
33 |
+
min_formula_batch4 = sample_from_pdf(batch_by_formula(gs, bs, 'min'), num_samples)
|
34 |
+
assert len(min_formula_batch4) == 100000
|
35 |
+
assert np.abs(np.mean(min_formula_batch4) - np.mean(min_batch4)) < 5
|
36 |
+
assert np.abs(np.var(min_formula_batch4) - np.var(min_batch4)) < 40
|
37 |
+
|
38 |
+
|
39 |
+
def test_bucketing():
|
40 |
+
shapes = list(itertools.islice(generate_random_gaussian(), 1000))
|
41 |
+
assert sum(shapes) == 498957
|
42 |
+
assert len(set(shapes)) == 229
|
43 |
+
#("lp_bucket", lp_bucket) # this takes quite long, so skipping its test
|
44 |
+
results = bucket_analysis(shapes, [("const_bucket", const_bucket), ("uniform_bucket", uniform_bucket), \
|
45 |
+
("percentile_bucket", percentile_bucket), ("lloyd_max_bucketing", lloyd_max_bucketing), \
|
46 |
+
("brute_force_min_pad_waste", brute_force_min_pad_waste)], [2,20])
|
47 |
+
expected = {'const_bucket': {2: {'totwaste': 662000, 'avgwaste': 662.0}, 20: {'totwaste': 662000, 'avgwaste': 662.0}}, \
|
48 |
+
'uniform_bucket': {2: {'totwaste': 579218, 'avgwaste': 579.218}, 20: {'totwaste': 506507, 'avgwaste': 506.507}}, \
|
49 |
+
'percentile_bucket': {2: {'totwaste': 579522, 'avgwaste': 579.522}, 20: {'totwaste': 506122, 'avgwaste': 506.122}}, \
|
50 |
+
'lloyd_max_bucketing': {2: {'totwaste': 594004, 'avgwaste': 594.004}, 20: {'totwaste': 506218, 'avgwaste': 506.218}}, \
|
51 |
+
'brute_force_min_pad_waste': {2: {'totwaste': 562739, 'avgwaste': 562.739,}, 20: {'totwaste': 504726, 'avgwaste': 504.726}}}
|
52 |
+
for algo_name in ["const_bucket", "uniform_bucket", "percentile_bucket", "lloyd_max_bucketing", "brute_force_min_pad_waste"]:
|
53 |
+
assert algo_name in results
|
54 |
+
val = results[algo_name]
|
55 |
+
for bkt in [2,20]:
|
56 |
+
assert bkt in val
|
57 |
+
bkt_result = val[bkt]
|
58 |
+
bkt_result.pop('time')
|
59 |
+
assert bkt_result == expected[algo_name][bkt]
|
60 |
+
|
61 |
+
|
62 |
+
def test_numsteps():
|
63 |
+
|
64 |
+
shapes = list(itertools.islice(generate_random_gaussian(), 10000))
|
65 |
+
lloyd_max_set_step = lambda step : (lambda shp, num_buckets : lloyd_max_bucketing(shp, num_buckets, step))
|
66 |
+
|
67 |
+
results = bucket_analysis(shapes, [("lloyd_max_02", lloyd_max_set_step(2)), ("lloyd_max_10", lloyd_max_set_step(10)), ("lloyd_max_20", lloyd_max_set_step(20))], [6, 10])
|
68 |
+
expected = {'lloyd_max_02': {6: {'totwaste': 5284440, 'avgwaste': 528.444}, \
|
69 |
+
10: {'totwaste': 5172300, 'avgwaste': 517.23,}}, \
|
70 |
+
'lloyd_max_10': {6: {'totwaste': 5226954, 'avgwaste': 522.6954}, \
|
71 |
+
10: {'totwaste': 5149487, 'avgwaste': 514.9487}}, \
|
72 |
+
'lloyd_max_20': {6: {'totwaste': 5209336, 'avgwaste': 520.9336}, \
|
73 |
+
10: {'totwaste': 5137341, 'avgwaste': 513.7341}}, \
|
74 |
+
'lloyd_max_30': {6: {'totwaste': 5203774, 'avgwaste': 520.3774}, \
|
75 |
+
10: {'totwaste': 5131550, 'avgwaste': 513.155}}}
|
76 |
+
expected = {'lloyd_max_02': {6: {'totwaste': 5284440, 'avgwaste': 528.444}, 10: {'totwaste': 5172300, 'avgwaste': 517.23}}, \
|
77 |
+
'lloyd_max_10': {6: {'totwaste': 5226191, 'avgwaste': 522.6191}, 10: {'totwaste': 5147715, 'avgwaste': 514.7715}}, \
|
78 |
+
'lloyd_max_20': {6: {'totwaste': 5209807, 'avgwaste': 520.9807}, 10: {'totwaste': 5135907, 'avgwaste': 513.5907}}}
|
79 |
+
for algo_name in ["lloyd_max_02", "lloyd_max_10", "lloyd_max_20"]:
|
80 |
+
assert algo_name in results
|
81 |
+
val = results[algo_name]
|
82 |
+
for bkt in [6,10]:
|
83 |
+
assert bkt in val
|
84 |
+
bkt_result = val[bkt]
|
85 |
+
bkt_result.pop('time')
|
86 |
+
#print(algo_name, bkt, bkt_result, expected[algo_name][bkt])
|
87 |
+
assert bkt_result == expected[algo_name][bkt]
|
88 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/dev_mn.yaml
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 32
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: true
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 10000 ]
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 32
|
32 |
+
in_channels: 4
|
33 |
+
out_channels: 4
|
34 |
+
model_channels: 32 # 320 # TODO increase
|
35 |
+
attention_resolutions: [ ] # is equal to fixed spatial resolution: 32 , 16 , 8
|
36 |
+
num_res_blocks: 2
|
37 |
+
channel_mult: [ 1, ]
|
38 |
+
#num_head_channels: 32
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 32
|
43 |
+
use_checkpoint: False
|
44 |
+
|
45 |
+
first_stage_config:
|
46 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
47 |
+
params:
|
48 |
+
embed_dim: 4
|
49 |
+
monitor: val/rec_loss
|
50 |
+
ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.BERTEmbedder
|
71 |
+
params:
|
72 |
+
n_embed: 32
|
73 |
+
n_layer: 1 #32 # TODO: increase
|
74 |
+
|
75 |
+
|
76 |
+
data:
|
77 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
78 |
+
params:
|
79 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion5b/laion2B-data/"
|
80 |
+
batch_size: 4
|
81 |
+
num_workers: 4
|
82 |
+
n_nodes: 4
|
83 |
+
train:
|
84 |
+
shards: '{000000..231339}.tar -'
|
85 |
+
shuffle: 10000
|
86 |
+
image_key: jpg
|
87 |
+
image_transforms:
|
88 |
+
- target: torchvision.transforms.Resize
|
89 |
+
params:
|
90 |
+
size: 256
|
91 |
+
interpolation: 3
|
92 |
+
- target: torchvision.transforms.RandomCrop
|
93 |
+
params:
|
94 |
+
size: 256
|
95 |
+
|
96 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
97 |
+
validation:
|
98 |
+
shards: '{231346..231349}.tar -'
|
99 |
+
shuffle: 0
|
100 |
+
image_key: jpg
|
101 |
+
image_transforms:
|
102 |
+
- target: torchvision.transforms.Resize
|
103 |
+
params:
|
104 |
+
size: 256
|
105 |
+
interpolation: 3
|
106 |
+
- target: torchvision.transforms.CenterCrop
|
107 |
+
params:
|
108 |
+
size: 256
|
109 |
+
|
110 |
+
|
111 |
+
lightning:
|
112 |
+
callbacks:
|
113 |
+
image_logger:
|
114 |
+
target: main.ImageLogger
|
115 |
+
params:
|
116 |
+
batch_frequency: 500 # 5000
|
117 |
+
max_images: 8
|
118 |
+
increase_log_steps: False
|
119 |
+
log_first_step: False
|
120 |
+
|
121 |
+
|
122 |
+
trainer:
|
123 |
+
#replace_sampler_ddp: False
|
124 |
+
benchmark: True
|
125 |
+
val_check_interval: 1000 # every 20k training steps
|
126 |
+
num_sanity_val_steps: 0
|
127 |
+
|
128 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/dev_mn_dummy.yaml
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 32
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: true
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 10000 ]
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 32
|
32 |
+
in_channels: 4
|
33 |
+
out_channels: 4
|
34 |
+
model_channels: 32 # 320 # TODO increase
|
35 |
+
attention_resolutions: [ ] # is equal to fixed spatial resolution: 32 , 16 , 8
|
36 |
+
num_res_blocks: 2
|
37 |
+
channel_mult: [ 1, ]
|
38 |
+
#num_head_channels: 32
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 32
|
43 |
+
use_checkpoint: False
|
44 |
+
|
45 |
+
first_stage_config:
|
46 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
47 |
+
params:
|
48 |
+
embed_dim: 4
|
49 |
+
monitor: val/rec_loss
|
50 |
+
ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.BERTEmbedder
|
71 |
+
params:
|
72 |
+
n_embed: 32
|
73 |
+
n_layer: 1 #32 # TODO: increase
|
74 |
+
|
75 |
+
|
76 |
+
data:
|
77 |
+
target: main.DataModuleFromConfig
|
78 |
+
params:
|
79 |
+
batch_size: 4
|
80 |
+
num_workers: 4
|
81 |
+
wrap: false
|
82 |
+
train:
|
83 |
+
target: ldm.data.dummy.DummyData
|
84 |
+
params:
|
85 |
+
length: 20000
|
86 |
+
size: [256, 256, 3]
|
87 |
+
validation:
|
88 |
+
target: ldm.data.dummy.DummyData
|
89 |
+
params:
|
90 |
+
length: 10000
|
91 |
+
size: [256, 256, 3]
|
92 |
+
|
93 |
+
|
94 |
+
lightning:
|
95 |
+
callbacks:
|
96 |
+
image_logger:
|
97 |
+
target: main.ImageLogger
|
98 |
+
params:
|
99 |
+
batch_frequency: 500 # 5000
|
100 |
+
max_images: 8
|
101 |
+
increase_log_steps: False
|
102 |
+
log_first_step: False
|
103 |
+
|
104 |
+
|
105 |
+
trainer:
|
106 |
+
#replace_sampler_ddp: False
|
107 |
+
benchmark: True
|
108 |
+
val_check_interval: 1000 # every 20k training steps
|
109 |
+
num_sanity_val_steps: 0
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/inpainting/v1-edgeinpainting.yaml
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 7.5e-05
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: hybrid # important
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
ckpt_path: "/fsx/stable-diffusion/stable-diffusion/checkpoints/v1pp/v1pp-flatlined-hr.ckpt"
|
19 |
+
|
20 |
+
concat_keys:
|
21 |
+
- mask
|
22 |
+
- masked_image
|
23 |
+
- smoothing_strength
|
24 |
+
|
25 |
+
c_concat_log_start: 1
|
26 |
+
c_concat_log_end: 5
|
27 |
+
|
28 |
+
scheduler_config: # 10000 warmup steps
|
29 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
30 |
+
params:
|
31 |
+
warm_up_steps: [ 2500 ]
|
32 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
33 |
+
f_start: [ 1.e-6 ]
|
34 |
+
f_max: [ 1. ]
|
35 |
+
f_min: [ 1. ]
|
36 |
+
|
37 |
+
unet_config:
|
38 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
39 |
+
params:
|
40 |
+
image_size: 32 # unused
|
41 |
+
in_channels: 10 # 4 data + 4 downscaled image + 1 mask + 1 smoothing strength
|
42 |
+
out_channels: 4
|
43 |
+
model_channels: 320
|
44 |
+
attention_resolutions: [ 4, 2, 1 ]
|
45 |
+
num_res_blocks: 2
|
46 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
47 |
+
num_heads: 8
|
48 |
+
use_spatial_transformer: True
|
49 |
+
transformer_depth: 1
|
50 |
+
context_dim: 768
|
51 |
+
use_checkpoint: True
|
52 |
+
legacy: False
|
53 |
+
|
54 |
+
first_stage_config:
|
55 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
56 |
+
params:
|
57 |
+
embed_dim: 4
|
58 |
+
monitor: val/rec_loss
|
59 |
+
ddconfig:
|
60 |
+
double_z: true
|
61 |
+
z_channels: 4
|
62 |
+
resolution: 256
|
63 |
+
in_channels: 3
|
64 |
+
out_ch: 3
|
65 |
+
ch: 128
|
66 |
+
ch_mult:
|
67 |
+
- 1
|
68 |
+
- 2
|
69 |
+
- 4
|
70 |
+
- 4
|
71 |
+
num_res_blocks: 2
|
72 |
+
attn_resolutions: []
|
73 |
+
dropout: 0.0
|
74 |
+
lossconfig:
|
75 |
+
target: torch.nn.Identity
|
76 |
+
|
77 |
+
cond_stage_config:
|
78 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
79 |
+
|
80 |
+
|
81 |
+
data:
|
82 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
83 |
+
params:
|
84 |
+
tar_base: "__improvedaesthetic__"
|
85 |
+
batch_size: 2
|
86 |
+
num_workers: 4
|
87 |
+
multinode: True
|
88 |
+
min_size: 512
|
89 |
+
max_pwatermark: 0.8
|
90 |
+
train:
|
91 |
+
shards: '{00000..17279}.tar -'
|
92 |
+
shuffle: 10000
|
93 |
+
image_key: jpg
|
94 |
+
image_transforms:
|
95 |
+
- target: torchvision.transforms.Resize
|
96 |
+
params:
|
97 |
+
size: 512
|
98 |
+
interpolation: 3
|
99 |
+
- target: torchvision.transforms.RandomCrop
|
100 |
+
params:
|
101 |
+
size: 512
|
102 |
+
postprocess:
|
103 |
+
target: ldm.data.laion.AddEdge
|
104 |
+
params:
|
105 |
+
mode: "512train-large"
|
106 |
+
|
107 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
108 |
+
validation:
|
109 |
+
shards: '{17280..17535}.tar -'
|
110 |
+
shuffle: 0
|
111 |
+
image_key: jpg
|
112 |
+
image_transforms:
|
113 |
+
- target: torchvision.transforms.Resize
|
114 |
+
params:
|
115 |
+
size: 512
|
116 |
+
interpolation: 3
|
117 |
+
- target: torchvision.transforms.CenterCrop
|
118 |
+
params:
|
119 |
+
size: 512
|
120 |
+
postprocess:
|
121 |
+
target: ldm.data.laion.AddEdge
|
122 |
+
params:
|
123 |
+
mode: "512train-large"
|
124 |
+
|
125 |
+
|
126 |
+
lightning:
|
127 |
+
find_unused_parameters: False
|
128 |
+
|
129 |
+
modelcheckpoint:
|
130 |
+
params:
|
131 |
+
every_n_train_steps: 2000
|
132 |
+
|
133 |
+
callbacks:
|
134 |
+
image_logger:
|
135 |
+
target: main.ImageLogger
|
136 |
+
params:
|
137 |
+
disabled: False
|
138 |
+
batch_frequency: 1000
|
139 |
+
max_images: 4
|
140 |
+
increase_log_steps: False
|
141 |
+
log_first_step: False
|
142 |
+
log_images_kwargs:
|
143 |
+
use_ema_scope: False
|
144 |
+
inpaint: False
|
145 |
+
plot_progressive_rows: False
|
146 |
+
plot_diffusion_rows: False
|
147 |
+
N: 4
|
148 |
+
unconditional_guidance_scale: 3.0
|
149 |
+
unconditional_guidance_label: [""]
|
150 |
+
ddim_steps: 100 # todo check these out for inpainting,
|
151 |
+
ddim_eta: 1.0 # todo check these out for inpainting,
|
152 |
+
|
153 |
+
trainer:
|
154 |
+
benchmark: True
|
155 |
+
val_check_interval: 5000000 # really sorry
|
156 |
+
num_sanity_val_steps: 0
|
157 |
+
accumulate_grad_batches: 2
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/inpainting/v1-finetune-for-inpainting-laion-aesthetic-larger-masks-and-ucfg.yaml
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 7.5e-05
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: hybrid # important
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
ckpt_path: "/fsx/stable-diffusion/stable-diffusion/checkpoints/v1pp/v1pphrflatlined2-pruned.ckpt"
|
19 |
+
|
20 |
+
ucg_training:
|
21 |
+
txt:
|
22 |
+
p: 0.1
|
23 |
+
val: ""
|
24 |
+
|
25 |
+
scheduler_config: # 10000 warmup steps
|
26 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
27 |
+
params:
|
28 |
+
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
|
29 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
30 |
+
f_start: [ 1.e-6 ]
|
31 |
+
f_max: [ 1. ]
|
32 |
+
f_min: [ 1. ]
|
33 |
+
|
34 |
+
unet_config:
|
35 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
36 |
+
params:
|
37 |
+
image_size: 32 # unused
|
38 |
+
in_channels: 9 # 4 data + 4 downscaled image + 1 mask
|
39 |
+
out_channels: 4
|
40 |
+
model_channels: 320
|
41 |
+
attention_resolutions: [ 4, 2, 1 ]
|
42 |
+
num_res_blocks: 2
|
43 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
44 |
+
num_heads: 8
|
45 |
+
use_spatial_transformer: True
|
46 |
+
transformer_depth: 1
|
47 |
+
context_dim: 768
|
48 |
+
use_checkpoint: True
|
49 |
+
legacy: False
|
50 |
+
|
51 |
+
first_stage_config:
|
52 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
53 |
+
params:
|
54 |
+
embed_dim: 4
|
55 |
+
monitor: val/rec_loss
|
56 |
+
ddconfig:
|
57 |
+
double_z: true
|
58 |
+
z_channels: 4
|
59 |
+
resolution: 256
|
60 |
+
in_channels: 3
|
61 |
+
out_ch: 3
|
62 |
+
ch: 128
|
63 |
+
ch_mult:
|
64 |
+
- 1
|
65 |
+
- 2
|
66 |
+
- 4
|
67 |
+
- 4
|
68 |
+
num_res_blocks: 2
|
69 |
+
attn_resolutions: []
|
70 |
+
dropout: 0.0
|
71 |
+
lossconfig:
|
72 |
+
target: torch.nn.Identity
|
73 |
+
|
74 |
+
cond_stage_config:
|
75 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
76 |
+
|
77 |
+
|
78 |
+
data:
|
79 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
80 |
+
params:
|
81 |
+
tar_base: "__improvedaesthetic__"
|
82 |
+
batch_size: 2
|
83 |
+
num_workers: 4
|
84 |
+
multinode: True
|
85 |
+
min_size: 512
|
86 |
+
max_pwatermark: 0.8
|
87 |
+
train:
|
88 |
+
shards: '{00000..17279}.tar -'
|
89 |
+
shuffle: 10000
|
90 |
+
image_key: jpg
|
91 |
+
image_transforms:
|
92 |
+
- target: torchvision.transforms.Resize
|
93 |
+
params:
|
94 |
+
size: 512
|
95 |
+
interpolation: 3
|
96 |
+
- target: torchvision.transforms.RandomCrop
|
97 |
+
params:
|
98 |
+
size: 512
|
99 |
+
postprocess:
|
100 |
+
target: ldm.data.laion.AddMask
|
101 |
+
params:
|
102 |
+
mode: "512train-large"
|
103 |
+
p_drop: 0.25
|
104 |
+
|
105 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
106 |
+
validation:
|
107 |
+
shards: '{17280..17535}.tar -'
|
108 |
+
shuffle: 0
|
109 |
+
image_key: jpg
|
110 |
+
image_transforms:
|
111 |
+
- target: torchvision.transforms.Resize
|
112 |
+
params:
|
113 |
+
size: 512
|
114 |
+
interpolation: 3
|
115 |
+
- target: torchvision.transforms.CenterCrop
|
116 |
+
params:
|
117 |
+
size: 512
|
118 |
+
postprocess:
|
119 |
+
target: ldm.data.laion.AddMask
|
120 |
+
params:
|
121 |
+
mode: "512train-large"
|
122 |
+
p_drop: 0.25
|
123 |
+
|
124 |
+
|
125 |
+
lightning:
|
126 |
+
find_unused_parameters: False
|
127 |
+
|
128 |
+
modelcheckpoint:
|
129 |
+
params:
|
130 |
+
every_n_train_steps: 2000
|
131 |
+
|
132 |
+
callbacks:
|
133 |
+
image_logger:
|
134 |
+
target: main.ImageLogger
|
135 |
+
params:
|
136 |
+
disabled: False
|
137 |
+
batch_frequency: 1000
|
138 |
+
max_images: 4
|
139 |
+
increase_log_steps: False
|
140 |
+
log_first_step: False
|
141 |
+
log_images_kwargs:
|
142 |
+
use_ema_scope: False
|
143 |
+
inpaint: False
|
144 |
+
plot_progressive_rows: False
|
145 |
+
plot_diffusion_rows: False
|
146 |
+
N: 4
|
147 |
+
unconditional_guidance_scale: 3.0
|
148 |
+
unconditional_guidance_label: [""]
|
149 |
+
ddim_steps: 100 # todo check these out for inpainting,
|
150 |
+
ddim_eta: 1.0 # todo check these out for inpainting,
|
151 |
+
|
152 |
+
trainer:
|
153 |
+
benchmark: True
|
154 |
+
val_check_interval: 5000000 # really sorry
|
155 |
+
num_sanity_val_steps: 0
|
156 |
+
accumulate_grad_batches: 2
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/inpainting/v1-finetune-for-inpainting-laion-aesthetic-larger-masks.yaml
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 7.5e-05
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: hybrid # important
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
ckpt_path: "/fsx/stable-diffusion/stable-diffusion/checkpoints/v1pp/v1pp-flatlined-hr.ckpt"
|
19 |
+
|
20 |
+
scheduler_config: # 10000 warmup steps
|
21 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
22 |
+
params:
|
23 |
+
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
|
24 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
25 |
+
f_start: [ 1.e-6 ]
|
26 |
+
f_max: [ 1. ]
|
27 |
+
f_min: [ 1. ]
|
28 |
+
|
29 |
+
unet_config:
|
30 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
31 |
+
params:
|
32 |
+
image_size: 32 # unused
|
33 |
+
in_channels: 9 # 4 data + 4 downscaled image + 1 mask
|
34 |
+
out_channels: 4
|
35 |
+
model_channels: 320
|
36 |
+
attention_resolutions: [ 4, 2, 1 ]
|
37 |
+
num_res_blocks: 2
|
38 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 768
|
43 |
+
use_checkpoint: True
|
44 |
+
legacy: False
|
45 |
+
|
46 |
+
first_stage_config:
|
47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
48 |
+
params:
|
49 |
+
embed_dim: 4
|
50 |
+
monitor: val/rec_loss
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
71 |
+
|
72 |
+
|
73 |
+
data:
|
74 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
75 |
+
params:
|
76 |
+
tar_base: "__improvedaesthetic__"
|
77 |
+
batch_size: 2
|
78 |
+
num_workers: 4
|
79 |
+
multinode: True
|
80 |
+
min_size: 512
|
81 |
+
max_pwatermark: 0.8
|
82 |
+
train:
|
83 |
+
shards: '{00000..17279}.tar -'
|
84 |
+
shuffle: 10000
|
85 |
+
image_key: jpg
|
86 |
+
image_transforms:
|
87 |
+
- target: torchvision.transforms.Resize
|
88 |
+
params:
|
89 |
+
size: 512
|
90 |
+
interpolation: 3
|
91 |
+
- target: torchvision.transforms.RandomCrop
|
92 |
+
params:
|
93 |
+
size: 512
|
94 |
+
postprocess:
|
95 |
+
target: ldm.data.laion.AddMask
|
96 |
+
params:
|
97 |
+
mode: "512train-large"
|
98 |
+
|
99 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
100 |
+
validation:
|
101 |
+
shards: '{17280..17535}.tar -'
|
102 |
+
shuffle: 0
|
103 |
+
image_key: jpg
|
104 |
+
image_transforms:
|
105 |
+
- target: torchvision.transforms.Resize
|
106 |
+
params:
|
107 |
+
size: 512
|
108 |
+
interpolation: 3
|
109 |
+
- target: torchvision.transforms.CenterCrop
|
110 |
+
params:
|
111 |
+
size: 512
|
112 |
+
postprocess:
|
113 |
+
target: ldm.data.laion.AddMask
|
114 |
+
params:
|
115 |
+
mode: "512train-large"
|
116 |
+
|
117 |
+
|
118 |
+
lightning:
|
119 |
+
find_unused_parameters: False
|
120 |
+
|
121 |
+
modelcheckpoint:
|
122 |
+
params:
|
123 |
+
every_n_train_steps: 2000
|
124 |
+
|
125 |
+
callbacks:
|
126 |
+
image_logger:
|
127 |
+
target: main.ImageLogger
|
128 |
+
params:
|
129 |
+
disabled: False
|
130 |
+
batch_frequency: 1000
|
131 |
+
max_images: 4
|
132 |
+
increase_log_steps: False
|
133 |
+
log_first_step: False
|
134 |
+
log_images_kwargs:
|
135 |
+
use_ema_scope: False
|
136 |
+
inpaint: False
|
137 |
+
plot_progressive_rows: False
|
138 |
+
plot_diffusion_rows: False
|
139 |
+
N: 4
|
140 |
+
unconditional_guidance_scale: 3.0
|
141 |
+
unconditional_guidance_label: [""]
|
142 |
+
ddim_steps: 100 # todo check these out for inpainting,
|
143 |
+
ddim_eta: 1.0 # todo check these out for inpainting,
|
144 |
+
|
145 |
+
trainer:
|
146 |
+
benchmark: True
|
147 |
+
val_check_interval: 5000000 # really sorry
|
148 |
+
num_sanity_val_steps: 0
|
149 |
+
accumulate_grad_batches: 2
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/inpainting/v1-finetune-for-inpainting-laion-iaesthe.yaml
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 7.5e-05
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: hybrid # important
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
ckpt_path: "/fsx/stable-diffusion/stable-diffusion/checkpoints2/v1pp/v1pp-flatline-pruned.ckpt"
|
19 |
+
|
20 |
+
scheduler_config: # 10000 warmup steps
|
21 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
22 |
+
params:
|
23 |
+
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
|
24 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
25 |
+
f_start: [ 1.e-6 ]
|
26 |
+
f_max: [ 1. ]
|
27 |
+
f_min: [ 1. ]
|
28 |
+
|
29 |
+
unet_config:
|
30 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
31 |
+
params:
|
32 |
+
image_size: 32 # unused
|
33 |
+
in_channels: 9 # 4 data + 4 downscaled image + 1 mask
|
34 |
+
out_channels: 4
|
35 |
+
model_channels: 320
|
36 |
+
attention_resolutions: [ 4, 2, 1 ]
|
37 |
+
num_res_blocks: 2
|
38 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 768
|
43 |
+
use_checkpoint: True
|
44 |
+
legacy: False
|
45 |
+
|
46 |
+
first_stage_config:
|
47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
48 |
+
params:
|
49 |
+
embed_dim: 4
|
50 |
+
monitor: val/rec_loss
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
71 |
+
|
72 |
+
|
73 |
+
data:
|
74 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
75 |
+
params:
|
76 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion-high-resolution/"
|
77 |
+
batch_size: 4
|
78 |
+
num_workers: 4
|
79 |
+
multinode: True
|
80 |
+
min_size: 512
|
81 |
+
train:
|
82 |
+
shards: '{00000..17279}.tar -'
|
83 |
+
shuffle: 10000
|
84 |
+
image_key: jpg
|
85 |
+
image_transforms:
|
86 |
+
- target: torchvision.transforms.Resize
|
87 |
+
params:
|
88 |
+
size: 512
|
89 |
+
interpolation: 3
|
90 |
+
- target: torchvision.transforms.RandomCrop
|
91 |
+
params:
|
92 |
+
size: 512
|
93 |
+
postprocess:
|
94 |
+
target: ldm.data.laion.AddMask
|
95 |
+
|
96 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
97 |
+
validation:
|
98 |
+
shards: '{17280..17535}.tar -'
|
99 |
+
shuffle: 0
|
100 |
+
image_key: jpg
|
101 |
+
image_transforms:
|
102 |
+
- target: torchvision.transforms.Resize
|
103 |
+
params:
|
104 |
+
size: 512
|
105 |
+
interpolation: 3
|
106 |
+
- target: torchvision.transforms.CenterCrop
|
107 |
+
params:
|
108 |
+
size: 512
|
109 |
+
postprocess:
|
110 |
+
target: ldm.data.laion.AddMask
|
111 |
+
|
112 |
+
|
113 |
+
lightning:
|
114 |
+
find_unused_parameters: False
|
115 |
+
|
116 |
+
modelcheckpoint:
|
117 |
+
params:
|
118 |
+
every_n_train_steps: 2000
|
119 |
+
|
120 |
+
callbacks:
|
121 |
+
image_logger:
|
122 |
+
target: main.ImageLogger
|
123 |
+
params:
|
124 |
+
disabled: False
|
125 |
+
batch_frequency: 1000
|
126 |
+
max_images: 4
|
127 |
+
increase_log_steps: False
|
128 |
+
log_first_step: False
|
129 |
+
log_images_kwargs:
|
130 |
+
use_ema_scope: False
|
131 |
+
inpaint: False
|
132 |
+
plot_progressive_rows: False
|
133 |
+
plot_diffusion_rows: False
|
134 |
+
N: 4
|
135 |
+
unconditional_guidance_scale: 3.0
|
136 |
+
unconditional_guidance_label: [""]
|
137 |
+
ddim_steps: 100 # todo check these out for inpainting,
|
138 |
+
ddim_eta: 1.0 # todo check these out for inpainting,
|
139 |
+
|
140 |
+
trainer:
|
141 |
+
benchmark: True
|
142 |
+
val_check_interval: 5000000 # really sorry
|
143 |
+
num_sanity_val_steps: 0
|
144 |
+
accumulate_grad_batches: 2
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512.yaml
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 1 ] # NOTE for resuming. use 10000 if starting from scratch
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 32 # unused
|
32 |
+
in_channels: 4
|
33 |
+
out_channels: 4
|
34 |
+
model_channels: 320
|
35 |
+
attention_resolutions: [ 4, 2, 1 ]
|
36 |
+
num_res_blocks: 2
|
37 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
38 |
+
num_heads: 8
|
39 |
+
use_spatial_transformer: True
|
40 |
+
transformer_depth: 1
|
41 |
+
context_dim: 768
|
42 |
+
use_checkpoint: True
|
43 |
+
legacy: False
|
44 |
+
|
45 |
+
first_stage_config:
|
46 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
47 |
+
params:
|
48 |
+
embed_dim: 4
|
49 |
+
monitor: val/rec_loss
|
50 |
+
ddconfig:
|
51 |
+
double_z: true
|
52 |
+
z_channels: 4
|
53 |
+
resolution: 256
|
54 |
+
in_channels: 3
|
55 |
+
out_ch: 3
|
56 |
+
ch: 128
|
57 |
+
ch_mult:
|
58 |
+
- 1
|
59 |
+
- 2
|
60 |
+
- 4
|
61 |
+
- 4
|
62 |
+
num_res_blocks: 2
|
63 |
+
attn_resolutions: []
|
64 |
+
dropout: 0.0
|
65 |
+
lossconfig:
|
66 |
+
target: torch.nn.Identity
|
67 |
+
|
68 |
+
cond_stage_config:
|
69 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
70 |
+
|
71 |
+
|
72 |
+
data:
|
73 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
74 |
+
params:
|
75 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion-high-resolution/"
|
76 |
+
batch_size: 4
|
77 |
+
num_workers: 4
|
78 |
+
multinode: True
|
79 |
+
train:
|
80 |
+
shards: '{00000..17279}.tar -'
|
81 |
+
shuffle: 10000
|
82 |
+
image_key: jpg
|
83 |
+
image_transforms:
|
84 |
+
- target: torchvision.transforms.Resize
|
85 |
+
params:
|
86 |
+
size: 512
|
87 |
+
interpolation: 3
|
88 |
+
- target: torchvision.transforms.RandomCrop
|
89 |
+
params:
|
90 |
+
size: 512
|
91 |
+
|
92 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
93 |
+
validation:
|
94 |
+
shards: '{17280..17535}.tar -'
|
95 |
+
shuffle: 0
|
96 |
+
image_key: jpg
|
97 |
+
image_transforms:
|
98 |
+
- target: torchvision.transforms.Resize
|
99 |
+
params:
|
100 |
+
size: 512
|
101 |
+
interpolation: 3
|
102 |
+
- target: torchvision.transforms.CenterCrop
|
103 |
+
params:
|
104 |
+
size: 512
|
105 |
+
|
106 |
+
|
107 |
+
lightning:
|
108 |
+
find_unused_parameters: False
|
109 |
+
|
110 |
+
modelcheckpoint:
|
111 |
+
params:
|
112 |
+
every_n_train_steps: 5000
|
113 |
+
|
114 |
+
callbacks:
|
115 |
+
image_logger:
|
116 |
+
target: main.ImageLogger
|
117 |
+
params:
|
118 |
+
batch_frequency: 5000
|
119 |
+
max_images: 4
|
120 |
+
increase_log_steps: False
|
121 |
+
log_first_step: False
|
122 |
+
log_images_kwargs:
|
123 |
+
use_ema_scope: False
|
124 |
+
inpaint: False
|
125 |
+
plot_progressive_rows: False
|
126 |
+
plot_diffusion_rows: False
|
127 |
+
N: 4
|
128 |
+
unconditional_guidance_scale: 3.0
|
129 |
+
unconditional_guidance_label: [""]
|
130 |
+
|
131 |
+
trainer:
|
132 |
+
benchmark: True
|
133 |
+
val_check_interval: 5000000 # really sorry
|
134 |
+
num_sanity_val_steps: 0
|
135 |
+
accumulate_grad_batches: 2
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder.yaml
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 32
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 10000 ]
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 32
|
32 |
+
in_channels: 4
|
33 |
+
out_channels: 4
|
34 |
+
model_channels: 320
|
35 |
+
attention_resolutions: [ 4, 2, 1 ]
|
36 |
+
num_res_blocks: 2
|
37 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
38 |
+
num_heads: 8
|
39 |
+
use_spatial_transformer: True
|
40 |
+
transformer_depth: 1
|
41 |
+
context_dim: 768
|
42 |
+
use_checkpoint: True
|
43 |
+
legacy: False
|
44 |
+
|
45 |
+
first_stage_config:
|
46 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
47 |
+
params:
|
48 |
+
embed_dim: 4
|
49 |
+
monitor: val/rec_loss
|
50 |
+
ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
71 |
+
|
72 |
+
|
73 |
+
data:
|
74 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
75 |
+
params:
|
76 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion5b/laion2B-data/"
|
77 |
+
batch_size: 50
|
78 |
+
num_workers: 4
|
79 |
+
multinode: True
|
80 |
+
train:
|
81 |
+
shards: '{000000..231317}.tar -'
|
82 |
+
shuffle: 10000
|
83 |
+
image_key: jpg
|
84 |
+
image_transforms:
|
85 |
+
- target: torchvision.transforms.Resize
|
86 |
+
params:
|
87 |
+
size: 256
|
88 |
+
interpolation: 3
|
89 |
+
- target: torchvision.transforms.RandomCrop
|
90 |
+
params:
|
91 |
+
size: 256
|
92 |
+
|
93 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
94 |
+
validation:
|
95 |
+
shards: '{231318..231349}.tar -'
|
96 |
+
shuffle: 0
|
97 |
+
image_key: jpg
|
98 |
+
image_transforms:
|
99 |
+
- target: torchvision.transforms.Resize
|
100 |
+
params:
|
101 |
+
size: 256
|
102 |
+
interpolation: 3
|
103 |
+
- target: torchvision.transforms.CenterCrop
|
104 |
+
params:
|
105 |
+
size: 256
|
106 |
+
|
107 |
+
|
108 |
+
lightning:
|
109 |
+
callbacks:
|
110 |
+
image_logger:
|
111 |
+
target: main.ImageLogger
|
112 |
+
params:
|
113 |
+
batch_frequency: 5000
|
114 |
+
max_images: 4
|
115 |
+
increase_log_steps: False
|
116 |
+
log_first_step: False
|
117 |
+
log_images_kwargs:
|
118 |
+
use_ema_scope: False
|
119 |
+
inpaint: False
|
120 |
+
plot_progressive_rows: False
|
121 |
+
plot_diffusion_rows: False
|
122 |
+
N: 4
|
123 |
+
unconditional_guidance_scale: 3.0
|
124 |
+
unconditional_guidance_label: [""]
|
125 |
+
|
126 |
+
trainer:
|
127 |
+
#replace_sampler_ddp: False
|
128 |
+
benchmark: True
|
129 |
+
val_check_interval: 5000000 # really sorry
|
130 |
+
num_sanity_val_steps: 0
|
131 |
+
accumulate_grad_batches: 2
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-1p4B-multinode.yaml
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 32
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: true
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 10000 ]
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 32
|
32 |
+
in_channels: 4
|
33 |
+
out_channels: 4
|
34 |
+
model_channels: 320
|
35 |
+
attention_resolutions: [ 4, 2, 1 ]
|
36 |
+
num_res_blocks: 2
|
37 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
38 |
+
num_heads: 8
|
39 |
+
use_spatial_transformer: True
|
40 |
+
transformer_depth: 1
|
41 |
+
context_dim: 1280
|
42 |
+
use_checkpoint: True
|
43 |
+
legacy: False
|
44 |
+
|
45 |
+
first_stage_config:
|
46 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
47 |
+
params:
|
48 |
+
embed_dim: 4
|
49 |
+
monitor: val/rec_loss
|
50 |
+
ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.BERTEmbedder
|
71 |
+
params:
|
72 |
+
n_embed: 1280
|
73 |
+
n_layer: 32
|
74 |
+
|
75 |
+
|
76 |
+
data:
|
77 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
78 |
+
params:
|
79 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion5b/laion2B-data/"
|
80 |
+
batch_size: 12
|
81 |
+
num_workers: 4
|
82 |
+
train:
|
83 |
+
shards: '{000000..231317}.tar -'
|
84 |
+
shuffle: 10000
|
85 |
+
image_key: jpg
|
86 |
+
image_transforms:
|
87 |
+
- target: torchvision.transforms.Resize
|
88 |
+
params:
|
89 |
+
size: 256
|
90 |
+
interpolation: 3
|
91 |
+
- target: torchvision.transforms.RandomCrop
|
92 |
+
params:
|
93 |
+
size: 256
|
94 |
+
|
95 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
96 |
+
validation:
|
97 |
+
shards: '{231318..231349}.tar -'
|
98 |
+
shuffle: 0
|
99 |
+
image_key: jpg
|
100 |
+
image_transforms:
|
101 |
+
- target: torchvision.transforms.Resize
|
102 |
+
params:
|
103 |
+
size: 256
|
104 |
+
interpolation: 3
|
105 |
+
- target: torchvision.transforms.CenterCrop
|
106 |
+
params:
|
107 |
+
size: 256
|
108 |
+
|
109 |
+
|
110 |
+
lightning:
|
111 |
+
callbacks:
|
112 |
+
image_logger:
|
113 |
+
target: main.ImageLogger
|
114 |
+
params:
|
115 |
+
batch_frequency: 5000
|
116 |
+
max_images: 8
|
117 |
+
increase_log_steps: False
|
118 |
+
log_first_step: False
|
119 |
+
|
120 |
+
|
121 |
+
trainer:
|
122 |
+
#replace_sampler_ddp: False
|
123 |
+
benchmark: True
|
124 |
+
val_check_interval: 50000
|
125 |
+
num_sanity_val_steps: 0
|
126 |
+
|
127 |
+
|
128 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-clip-encoder-dev.yaml
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 32
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 10000 ]
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 32
|
32 |
+
in_channels: 4
|
33 |
+
out_channels: 4
|
34 |
+
model_channels: 320
|
35 |
+
attention_resolutions: [ 4, 2, 1 ]
|
36 |
+
num_res_blocks: 2
|
37 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
38 |
+
num_heads: 8
|
39 |
+
use_spatial_transformer: True
|
40 |
+
transformer_depth: 1
|
41 |
+
context_dim: 768
|
42 |
+
use_checkpoint: True
|
43 |
+
legacy: False
|
44 |
+
|
45 |
+
first_stage_config:
|
46 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
47 |
+
params:
|
48 |
+
embed_dim: 4
|
49 |
+
monitor: val/rec_loss
|
50 |
+
ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
71 |
+
|
72 |
+
|
73 |
+
data:
|
74 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
75 |
+
params:
|
76 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion5b/laion2B-data/"
|
77 |
+
batch_size: 56
|
78 |
+
num_workers: 4
|
79 |
+
multinode: True
|
80 |
+
train:
|
81 |
+
shards: '{000000..231317}.tar -'
|
82 |
+
shuffle: 10000
|
83 |
+
image_key: jpg
|
84 |
+
image_transforms:
|
85 |
+
- target: torchvision.transforms.Resize
|
86 |
+
params:
|
87 |
+
size: 256
|
88 |
+
interpolation: 3
|
89 |
+
- target: torchvision.transforms.RandomCrop
|
90 |
+
params:
|
91 |
+
size: 256
|
92 |
+
|
93 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
94 |
+
validation:
|
95 |
+
shards: '{231318..231349}.tar -'
|
96 |
+
shuffle: 0
|
97 |
+
image_key: jpg
|
98 |
+
image_transforms:
|
99 |
+
- target: torchvision.transforms.Resize
|
100 |
+
params:
|
101 |
+
size: 256
|
102 |
+
interpolation: 3
|
103 |
+
- target: torchvision.transforms.CenterCrop
|
104 |
+
params:
|
105 |
+
size: 256
|
106 |
+
|
107 |
+
|
108 |
+
lightning:
|
109 |
+
callbacks:
|
110 |
+
image_logger:
|
111 |
+
target: main.ImageLogger
|
112 |
+
params:
|
113 |
+
batch_frequency: 5000
|
114 |
+
max_images: 8
|
115 |
+
increase_log_steps: False
|
116 |
+
log_first_step: False
|
117 |
+
|
118 |
+
|
119 |
+
trainer:
|
120 |
+
#replace_sampler_ddp: False
|
121 |
+
benchmark: True
|
122 |
+
val_check_interval: 50000
|
123 |
+
num_sanity_val_steps: 0
|
124 |
+
accumulate_grad_batches: 2
|
125 |
+
|
126 |
+
|
127 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-ldm-unfrozen-dev.yaml
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 32
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: true
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 10000 ]
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 32
|
32 |
+
in_channels: 4
|
33 |
+
out_channels: 4
|
34 |
+
model_channels: 320
|
35 |
+
attention_resolutions: [ 4, 2, 1 ]
|
36 |
+
num_res_blocks: 2
|
37 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
38 |
+
num_heads: 8
|
39 |
+
use_spatial_transformer: True
|
40 |
+
transformer_depth: 1
|
41 |
+
context_dim: 1280
|
42 |
+
use_checkpoint: True
|
43 |
+
legacy: False
|
44 |
+
|
45 |
+
first_stage_config:
|
46 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
47 |
+
params:
|
48 |
+
embed_dim: 4
|
49 |
+
monitor: val/rec_loss
|
50 |
+
ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.BERTEmbedder
|
71 |
+
params:
|
72 |
+
n_embed: 1280
|
73 |
+
n_layer: 32
|
74 |
+
|
75 |
+
|
76 |
+
data:
|
77 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
78 |
+
params:
|
79 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion5b/laion2B-data/"
|
80 |
+
batch_size: 12
|
81 |
+
num_workers: 4
|
82 |
+
multinode: False
|
83 |
+
train:
|
84 |
+
shards: '{000000..231317}.tar -'
|
85 |
+
shuffle: 10000
|
86 |
+
image_key: jpg
|
87 |
+
image_transforms:
|
88 |
+
- target: torchvision.transforms.Resize
|
89 |
+
params:
|
90 |
+
size: 256
|
91 |
+
interpolation: 3
|
92 |
+
- target: torchvision.transforms.RandomCrop
|
93 |
+
params:
|
94 |
+
size: 256
|
95 |
+
|
96 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
97 |
+
validation:
|
98 |
+
shards: '{231318..231349}.tar -'
|
99 |
+
shuffle: 0
|
100 |
+
image_key: jpg
|
101 |
+
image_transforms:
|
102 |
+
- target: torchvision.transforms.Resize
|
103 |
+
params:
|
104 |
+
size: 256
|
105 |
+
interpolation: 3
|
106 |
+
- target: torchvision.transforms.CenterCrop
|
107 |
+
params:
|
108 |
+
size: 256
|
109 |
+
|
110 |
+
|
111 |
+
lightning:
|
112 |
+
callbacks:
|
113 |
+
image_logger:
|
114 |
+
target: main.ImageLogger
|
115 |
+
params:
|
116 |
+
batch_frequency: 5000
|
117 |
+
max_images: 8
|
118 |
+
increase_log_steps: False
|
119 |
+
log_first_step: False
|
120 |
+
|
121 |
+
|
122 |
+
trainer:
|
123 |
+
#replace_sampler_ddp: False
|
124 |
+
benchmark: True
|
125 |
+
val_check_interval: 50000
|
126 |
+
num_sanity_val_steps: 0
|
127 |
+
|
128 |
+
|
129 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-ldm-vae-f8.yaml
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04 # TODO: run with scale_lr False
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 32
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: true
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 10000 ]
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 32
|
32 |
+
in_channels: 4
|
33 |
+
out_channels: 4
|
34 |
+
model_channels: 128 # 320 # TODO increase
|
35 |
+
attention_resolutions: [ 4, 2, 1 ] # is equal to fixed spatial resolution: 32 , 16 , 8
|
36 |
+
num_res_blocks: 2
|
37 |
+
channel_mult: [ 1,2,4,4 ]
|
38 |
+
#num_head_channels: 32
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 1280
|
43 |
+
use_checkpoint: True
|
44 |
+
|
45 |
+
first_stage_config:
|
46 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
47 |
+
params:
|
48 |
+
embed_dim: 4
|
49 |
+
monitor: val/rec_loss
|
50 |
+
ckpt_path: "/home/robin/projects/latent-diffusion/models/first_stage_models/kl-f8/model.ckpt"
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.BERTEmbedder
|
71 |
+
params:
|
72 |
+
n_embed: 1280
|
73 |
+
n_layer: 3 #32 # TODO: increase
|
74 |
+
|
75 |
+
|
76 |
+
data:
|
77 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
78 |
+
params:
|
79 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion5b/laion2B-data/"
|
80 |
+
batch_size: 60
|
81 |
+
num_workers: 4
|
82 |
+
n_nodes: 2 # TODO: runs with two gpus
|
83 |
+
train:
|
84 |
+
shards: '{000000..000010}.tar -' # TODO: wild guess, change
|
85 |
+
image_key: jpg
|
86 |
+
image_transforms:
|
87 |
+
- target: torchvision.transforms.Resize
|
88 |
+
params:
|
89 |
+
size: 512
|
90 |
+
interpolation: 3
|
91 |
+
- target: torchvision.transforms.RandomCrop
|
92 |
+
params:
|
93 |
+
size: 512
|
94 |
+
|
95 |
+
shuffle: 5000
|
96 |
+
n_examples: 16519100 # TODO: find out
|
97 |
+
validation:
|
98 |
+
shards: '{000011..000012}.tar -' # TODO: wild guess, change
|
99 |
+
image_key: jpg
|
100 |
+
image_transforms:
|
101 |
+
- target: torchvision.transforms.Resize
|
102 |
+
params:
|
103 |
+
size: 512
|
104 |
+
interpolation: 3
|
105 |
+
- target: torchvision.transforms.CenterCrop
|
106 |
+
params:
|
107 |
+
size: 512
|
108 |
+
|
109 |
+
shuffle: 0
|
110 |
+
n_examples: 60000 # TODO: find out
|
111 |
+
val_num_workers: 2
|
112 |
+
|
113 |
+
|
114 |
+
|
115 |
+
lightning:
|
116 |
+
callbacks:
|
117 |
+
image_logger:
|
118 |
+
target: main.ImageLogger
|
119 |
+
params:
|
120 |
+
batch_frequency: 5000 # 5000
|
121 |
+
max_images: 8
|
122 |
+
increase_log_steps: False
|
123 |
+
log_first_step: True
|
124 |
+
|
125 |
+
|
126 |
+
trainer:
|
127 |
+
replace_sampler_ddp: False
|
128 |
+
benchmark: True
|
129 |
+
val_check_interval: 20000 # every 20k training steps
|
130 |
+
num_sanity_val_steps: 0
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-multinode-clip-encoder-f16-1024-laion-hr.yaml
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.001
|
6 |
+
linear_end: 0.015
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 16
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.22765929 # magic number
|
18 |
+
|
19 |
+
#ckpt_path: "/home/mchorse/stable-diffusion-ckpts/768f16-2022-06-23-pruned.ckpt"
|
20 |
+
|
21 |
+
#scheduler_config: # 10000 warmup steps
|
22 |
+
# target: ldm.lr_scheduler.LambdaLinearScheduler
|
23 |
+
# params:
|
24 |
+
# warm_up_steps: [ 10000 ]
|
25 |
+
# cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
26 |
+
# f_start: [ 1.e-6 ]
|
27 |
+
# f_max: [ 1. ]
|
28 |
+
# f_min: [ 1. ]
|
29 |
+
|
30 |
+
unet_config:
|
31 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
32 |
+
params:
|
33 |
+
image_size: 64 # not really needed
|
34 |
+
in_channels: 16
|
35 |
+
out_channels: 16
|
36 |
+
model_channels: 320
|
37 |
+
attention_resolutions: [ 4, 2, 1 ]
|
38 |
+
num_res_blocks: 2
|
39 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
40 |
+
num_heads: 8
|
41 |
+
use_spatial_transformer: True
|
42 |
+
transformer_depth: 1
|
43 |
+
context_dim: 768
|
44 |
+
use_checkpoint: True
|
45 |
+
legacy: False
|
46 |
+
|
47 |
+
first_stage_config:
|
48 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
49 |
+
params:
|
50 |
+
embed_dim: 16
|
51 |
+
monitor: val/rec_loss
|
52 |
+
ddconfig:
|
53 |
+
double_z: True
|
54 |
+
z_channels: 16
|
55 |
+
resolution: 256
|
56 |
+
in_channels: 3
|
57 |
+
out_ch: 3
|
58 |
+
ch: 128
|
59 |
+
ch_mult: [ 1,1,2,2,4 ] # num_down = len(ch_mult)-1
|
60 |
+
num_res_blocks: 2
|
61 |
+
attn_resolutions: [ 16 ]
|
62 |
+
dropout: 0.0
|
63 |
+
lossconfig:
|
64 |
+
target: torch.nn.Identity
|
65 |
+
|
66 |
+
cond_stage_config:
|
67 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
68 |
+
|
69 |
+
|
70 |
+
data:
|
71 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
72 |
+
params:
|
73 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion-high-resolution/"
|
74 |
+
batch_size: 3
|
75 |
+
num_workers: 4
|
76 |
+
multinode: True
|
77 |
+
train:
|
78 |
+
shards: '{00000..17279}.tar -'
|
79 |
+
shuffle: 10000
|
80 |
+
image_key: jpg
|
81 |
+
image_transforms:
|
82 |
+
- target: torchvision.transforms.Resize
|
83 |
+
params:
|
84 |
+
size: 1024
|
85 |
+
interpolation: 3
|
86 |
+
- target: torchvision.transforms.RandomCrop
|
87 |
+
params:
|
88 |
+
size: 1024
|
89 |
+
|
90 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
91 |
+
validation:
|
92 |
+
shards: '{17280..17535}.tar -'
|
93 |
+
shuffle: 0
|
94 |
+
image_key: jpg
|
95 |
+
image_transforms:
|
96 |
+
- target: torchvision.transforms.Resize
|
97 |
+
params:
|
98 |
+
size: 1024
|
99 |
+
interpolation: 3
|
100 |
+
- target: torchvision.transforms.CenterCrop
|
101 |
+
params:
|
102 |
+
size: 1024
|
103 |
+
|
104 |
+
|
105 |
+
lightning:
|
106 |
+
find_unused_parameters: False
|
107 |
+
|
108 |
+
modelcheckpoint:
|
109 |
+
params:
|
110 |
+
every_n_train_steps: 2000
|
111 |
+
|
112 |
+
callbacks:
|
113 |
+
image_logger:
|
114 |
+
target: main.ImageLogger
|
115 |
+
params:
|
116 |
+
batch_frequency: 2000
|
117 |
+
max_images: 2
|
118 |
+
increase_log_steps: False
|
119 |
+
log_first_step: False
|
120 |
+
log_images_kwargs:
|
121 |
+
use_ema_scope: False
|
122 |
+
inpaint: False
|
123 |
+
plot_progressive_rows: False
|
124 |
+
plot_diffusion_rows: False
|
125 |
+
N: 2
|
126 |
+
unconditional_guidance_scale: 5.0
|
127 |
+
unconditional_guidance_label: [""]
|
128 |
+
|
129 |
+
trainer:
|
130 |
+
benchmark: True
|
131 |
+
val_check_interval: 5000000
|
132 |
+
num_sanity_val_steps: 0
|
133 |
+
accumulate_grad_batches: 4
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-multinode-clip-encoder-f16-256-pretraining.yaml
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.001
|
6 |
+
linear_end: 0.015
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 16
|
13 |
+
channels: 16
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.22765929 # magic number
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 10000 ]
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 16 # not really needed
|
32 |
+
in_channels: 16
|
33 |
+
out_channels: 16
|
34 |
+
model_channels: 320 # TODO: scale model here
|
35 |
+
attention_resolutions: [ 4, 2, 1 ]
|
36 |
+
num_res_blocks: 2
|
37 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
38 |
+
num_heads: 8
|
39 |
+
use_spatial_transformer: True
|
40 |
+
transformer_depth: 1
|
41 |
+
context_dim: 768
|
42 |
+
use_checkpoint: True
|
43 |
+
legacy: False
|
44 |
+
|
45 |
+
first_stage_config:
|
46 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
47 |
+
params:
|
48 |
+
embed_dim: 16
|
49 |
+
monitor: val/rec_loss
|
50 |
+
ckpt_path: "models/first_stage_models/kl-f16/model.ckpt"
|
51 |
+
ddconfig:
|
52 |
+
double_z: True
|
53 |
+
z_channels: 16
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult: [ 1,1,2,2,4 ] # num_down = len(ch_mult)-1
|
59 |
+
num_res_blocks: 2
|
60 |
+
attn_resolutions: [ 16 ]
|
61 |
+
dropout: 0.0
|
62 |
+
lossconfig:
|
63 |
+
target: torch.nn.Identity
|
64 |
+
|
65 |
+
cond_stage_config:
|
66 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
67 |
+
|
68 |
+
|
69 |
+
data:
|
70 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
71 |
+
params:
|
72 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion5b/laion2B-data/"
|
73 |
+
batch_size: 55
|
74 |
+
num_workers: 4
|
75 |
+
multinode: True
|
76 |
+
min_size: 256 # TODO: experiment. Note: for 2B, images are stored at max 384 resolution
|
77 |
+
train:
|
78 |
+
shards: '{000000..231317}.tar -'
|
79 |
+
shuffle: 10000
|
80 |
+
image_key: jpg
|
81 |
+
image_transforms:
|
82 |
+
- target: torchvision.transforms.Resize
|
83 |
+
params:
|
84 |
+
size: 256
|
85 |
+
interpolation: 3
|
86 |
+
- target: torchvision.transforms.RandomCrop
|
87 |
+
params:
|
88 |
+
size: 256
|
89 |
+
|
90 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
91 |
+
validation:
|
92 |
+
shards: '{231318..231349}.tar -'
|
93 |
+
shuffle: 0
|
94 |
+
image_key: jpg
|
95 |
+
image_transforms:
|
96 |
+
- target: torchvision.transforms.Resize
|
97 |
+
params:
|
98 |
+
size: 256
|
99 |
+
interpolation: 3
|
100 |
+
- target: torchvision.transforms.CenterCrop
|
101 |
+
params:
|
102 |
+
size: 256
|
103 |
+
|
104 |
+
|
105 |
+
lightning:
|
106 |
+
callbacks:
|
107 |
+
image_logger:
|
108 |
+
target: main.ImageLogger
|
109 |
+
params:
|
110 |
+
batch_frequency: 5000
|
111 |
+
max_images: 4
|
112 |
+
increase_log_steps: False
|
113 |
+
log_first_step: False
|
114 |
+
log_images_kwargs:
|
115 |
+
use_ema_scope: False
|
116 |
+
inpaint: False
|
117 |
+
plot_progressive_rows: False
|
118 |
+
plot_diffusion_rows: False
|
119 |
+
N: 4
|
120 |
+
unconditional_guidance_scale: 3.0
|
121 |
+
unconditional_guidance_label: [""]
|
122 |
+
|
123 |
+
trainer:
|
124 |
+
benchmark: True
|
125 |
+
val_check_interval: 5000000 # really sorry
|
126 |
+
num_sanity_val_steps: 0
|
127 |
+
accumulate_grad_batches: 2
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-multinode-clip-encoder-f16-768-laion-hr-inference.yaml
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.001
|
6 |
+
linear_end: 0.015
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 48
|
13 |
+
channels: 16
|
14 |
+
cond_stage_trainable: false
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.22765929 # magic number
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 10000 ]
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 48
|
32 |
+
in_channels: 16
|
33 |
+
out_channels: 16
|
34 |
+
model_channels: 320
|
35 |
+
attention_resolutions: [ 4, 2, 1 ]
|
36 |
+
num_res_blocks: 2
|
37 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
38 |
+
num_heads: 8
|
39 |
+
use_spatial_transformer: True
|
40 |
+
transformer_depth: 1
|
41 |
+
context_dim: 768
|
42 |
+
use_checkpoint: True
|
43 |
+
legacy: False
|
44 |
+
|
45 |
+
first_stage_config:
|
46 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
47 |
+
params:
|
48 |
+
embed_dim: 16
|
49 |
+
monitor: val/rec_loss
|
50 |
+
ddconfig:
|
51 |
+
double_z: True
|
52 |
+
z_channels: 16
|
53 |
+
resolution: 256
|
54 |
+
in_channels: 3
|
55 |
+
out_ch: 3
|
56 |
+
ch: 128
|
57 |
+
ch_mult: [ 1,1,2,2,4 ] # num_down = len(ch_mult)-1
|
58 |
+
num_res_blocks: 2
|
59 |
+
attn_resolutions: [ 16 ]
|
60 |
+
dropout: 0.0
|
61 |
+
lossconfig:
|
62 |
+
target: torch.nn.Identity
|
63 |
+
|
64 |
+
cond_stage_config:
|
65 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-multinode-clip-encoder-f16-768-laion-hr.yaml
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.001
|
6 |
+
linear_end: 0.015
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 48
|
13 |
+
channels: 16
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.22765929 # magic number
|
18 |
+
|
19 |
+
ckpt_path: "/home/mchorse/stable-diffusion-ckpts/768f16-2022-06-23-pruned.ckpt"
|
20 |
+
|
21 |
+
scheduler_config: # 10000 warmup steps
|
22 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
23 |
+
params:
|
24 |
+
warm_up_steps: [ 10000 ]
|
25 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
26 |
+
f_start: [ 1.e-6 ]
|
27 |
+
f_max: [ 1. ]
|
28 |
+
f_min: [ 1. ]
|
29 |
+
|
30 |
+
unet_config:
|
31 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
32 |
+
params:
|
33 |
+
image_size: 48 # not really needed
|
34 |
+
in_channels: 16
|
35 |
+
out_channels: 16
|
36 |
+
model_channels: 320
|
37 |
+
attention_resolutions: [ 4, 2, 1 ]
|
38 |
+
num_res_blocks: 2
|
39 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
40 |
+
num_heads: 8
|
41 |
+
use_spatial_transformer: True
|
42 |
+
transformer_depth: 1
|
43 |
+
context_dim: 768
|
44 |
+
use_checkpoint: True
|
45 |
+
legacy: False
|
46 |
+
|
47 |
+
first_stage_config:
|
48 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
49 |
+
params:
|
50 |
+
embed_dim: 16
|
51 |
+
monitor: val/rec_loss
|
52 |
+
ddconfig:
|
53 |
+
double_z: True
|
54 |
+
z_channels: 16
|
55 |
+
resolution: 256
|
56 |
+
in_channels: 3
|
57 |
+
out_ch: 3
|
58 |
+
ch: 128
|
59 |
+
ch_mult: [ 1,1,2,2,4 ] # num_down = len(ch_mult)-1
|
60 |
+
num_res_blocks: 2
|
61 |
+
attn_resolutions: [ 16 ]
|
62 |
+
dropout: 0.0
|
63 |
+
lossconfig:
|
64 |
+
target: torch.nn.Identity
|
65 |
+
|
66 |
+
cond_stage_config:
|
67 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
68 |
+
|
69 |
+
|
70 |
+
data:
|
71 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
72 |
+
params:
|
73 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion-high-resolution/"
|
74 |
+
batch_size: 6
|
75 |
+
num_workers: 4
|
76 |
+
multinode: True
|
77 |
+
train:
|
78 |
+
shards: '{00000..17279}.tar -'
|
79 |
+
shuffle: 10000
|
80 |
+
image_key: jpg
|
81 |
+
image_transforms:
|
82 |
+
- target: torchvision.transforms.Resize
|
83 |
+
params:
|
84 |
+
size: 768
|
85 |
+
interpolation: 3
|
86 |
+
- target: torchvision.transforms.RandomCrop
|
87 |
+
params:
|
88 |
+
size: 768
|
89 |
+
|
90 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
91 |
+
validation:
|
92 |
+
shards: '{17280..17535}.tar -'
|
93 |
+
shuffle: 0
|
94 |
+
image_key: jpg
|
95 |
+
image_transforms:
|
96 |
+
- target: torchvision.transforms.Resize
|
97 |
+
params:
|
98 |
+
size: 768
|
99 |
+
interpolation: 3
|
100 |
+
- target: torchvision.transforms.CenterCrop
|
101 |
+
params:
|
102 |
+
size: 768
|
103 |
+
|
104 |
+
|
105 |
+
lightning:
|
106 |
+
find_unused_parameters: False
|
107 |
+
|
108 |
+
modelcheckpoint:
|
109 |
+
params:
|
110 |
+
every_n_train_steps: 5000
|
111 |
+
|
112 |
+
callbacks:
|
113 |
+
image_logger:
|
114 |
+
target: main.ImageLogger
|
115 |
+
params:
|
116 |
+
batch_frequency: 5000
|
117 |
+
max_images: 4
|
118 |
+
increase_log_steps: False
|
119 |
+
log_first_step: False
|
120 |
+
log_images_kwargs:
|
121 |
+
use_ema_scope: False
|
122 |
+
inpaint: False
|
123 |
+
plot_progressive_rows: False
|
124 |
+
plot_diffusion_rows: False
|
125 |
+
N: 4
|
126 |
+
unconditional_guidance_scale: 3.0
|
127 |
+
unconditional_guidance_label: [""]
|
128 |
+
|
129 |
+
trainer:
|
130 |
+
benchmark: True
|
131 |
+
val_check_interval: 5000000
|
132 |
+
num_sanity_val_steps: 0
|
133 |
+
accumulate_grad_batches: 2
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-multinode-clip-encoder-f16-768.yaml
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.001
|
6 |
+
linear_end: 0.015
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 48
|
13 |
+
channels: 16
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.22765929 # magic number
|
18 |
+
|
19 |
+
ckpt_path: "/home/mchorse/stable-diffusion-ckpts/256f16-2022-06-15-216k-pruned.ckpt"
|
20 |
+
|
21 |
+
scheduler_config: # 10000 warmup steps
|
22 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
23 |
+
params:
|
24 |
+
warm_up_steps: [ 10000 ]
|
25 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
26 |
+
f_start: [ 1.e-6 ]
|
27 |
+
f_max: [ 1. ]
|
28 |
+
f_min: [ 1. ]
|
29 |
+
|
30 |
+
unet_config:
|
31 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
32 |
+
params:
|
33 |
+
image_size: 48 # not really needed
|
34 |
+
in_channels: 16
|
35 |
+
out_channels: 16
|
36 |
+
model_channels: 320 # TODO: scale model here
|
37 |
+
attention_resolutions: [ 4, 2, 1 ]
|
38 |
+
num_res_blocks: 2
|
39 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
40 |
+
num_heads: 8
|
41 |
+
use_spatial_transformer: True
|
42 |
+
transformer_depth: 1
|
43 |
+
context_dim: 768
|
44 |
+
use_checkpoint: True
|
45 |
+
legacy: False
|
46 |
+
|
47 |
+
first_stage_config:
|
48 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
49 |
+
params:
|
50 |
+
embed_dim: 16
|
51 |
+
monitor: val/rec_loss
|
52 |
+
ddconfig:
|
53 |
+
double_z: True
|
54 |
+
z_channels: 16
|
55 |
+
resolution: 256
|
56 |
+
in_channels: 3
|
57 |
+
out_ch: 3
|
58 |
+
ch: 128
|
59 |
+
ch_mult: [ 1,1,2,2,4 ] # num_down = len(ch_mult)-1
|
60 |
+
num_res_blocks: 2
|
61 |
+
attn_resolutions: [ 16 ]
|
62 |
+
dropout: 0.0
|
63 |
+
lossconfig:
|
64 |
+
target: torch.nn.Identity
|
65 |
+
|
66 |
+
cond_stage_config:
|
67 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
68 |
+
|
69 |
+
|
70 |
+
data:
|
71 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
72 |
+
params:
|
73 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion5b/laion2B-data/"
|
74 |
+
batch_size: 6
|
75 |
+
num_workers: 4
|
76 |
+
multinode: True
|
77 |
+
min_size: 384 # TODO: experiment. Note: for 2B, images are stored at max 384 resolution
|
78 |
+
train:
|
79 |
+
shards: '{000000..231317}.tar -'
|
80 |
+
shuffle: 10000
|
81 |
+
image_key: jpg
|
82 |
+
image_transforms:
|
83 |
+
- target: torchvision.transforms.Resize
|
84 |
+
params:
|
85 |
+
size: 768
|
86 |
+
interpolation: 3
|
87 |
+
- target: torchvision.transforms.RandomCrop
|
88 |
+
params:
|
89 |
+
size: 768
|
90 |
+
|
91 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
92 |
+
validation:
|
93 |
+
shards: '{231318..231349}.tar -'
|
94 |
+
shuffle: 0
|
95 |
+
image_key: jpg
|
96 |
+
image_transforms:
|
97 |
+
- target: torchvision.transforms.Resize
|
98 |
+
params:
|
99 |
+
size: 768
|
100 |
+
interpolation: 3
|
101 |
+
- target: torchvision.transforms.CenterCrop
|
102 |
+
params:
|
103 |
+
size: 768
|
104 |
+
|
105 |
+
|
106 |
+
lightning:
|
107 |
+
find_unused_parameters: False
|
108 |
+
|
109 |
+
callbacks:
|
110 |
+
image_logger:
|
111 |
+
target: main.ImageLogger
|
112 |
+
params:
|
113 |
+
batch_frequency: 5000
|
114 |
+
max_images: 4
|
115 |
+
increase_log_steps: False
|
116 |
+
log_first_step: False
|
117 |
+
log_images_kwargs:
|
118 |
+
use_ema_scope: False
|
119 |
+
inpaint: False
|
120 |
+
plot_progressive_rows: False
|
121 |
+
plot_diffusion_rows: False
|
122 |
+
N: 4
|
123 |
+
unconditional_guidance_scale: 3.0
|
124 |
+
unconditional_guidance_label: [""]
|
125 |
+
|
126 |
+
trainer:
|
127 |
+
benchmark: True
|
128 |
+
val_check_interval: 5000000
|
129 |
+
num_sanity_val_steps: 0
|
130 |
+
accumulate_grad_batches: 2
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-t5-encoder-dev.yaml
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 32
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 10000 ]
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 32
|
32 |
+
in_channels: 4
|
33 |
+
out_channels: 4
|
34 |
+
model_channels: 320
|
35 |
+
attention_resolutions: [ 4, 2, 1 ]
|
36 |
+
num_res_blocks: 2
|
37 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
38 |
+
num_heads: 8
|
39 |
+
use_spatial_transformer: True
|
40 |
+
transformer_depth: 1
|
41 |
+
context_dim: 2048
|
42 |
+
use_checkpoint: True
|
43 |
+
legacy: False
|
44 |
+
|
45 |
+
first_stage_config:
|
46 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
47 |
+
params:
|
48 |
+
embed_dim: 4
|
49 |
+
monitor: val/rec_loss
|
50 |
+
ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.FrozenT5Embedder
|
71 |
+
params:
|
72 |
+
version: "google/t5-v1_1-xl"
|
73 |
+
|
74 |
+
|
75 |
+
data:
|
76 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
77 |
+
params:
|
78 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion5b/laion2B-data/"
|
79 |
+
batch_size: 40
|
80 |
+
num_workers: 4
|
81 |
+
multinode: False
|
82 |
+
train:
|
83 |
+
shards: '{000000..231317}.tar -'
|
84 |
+
shuffle: 10000
|
85 |
+
image_key: jpg
|
86 |
+
image_transforms:
|
87 |
+
- target: torchvision.transforms.Resize
|
88 |
+
params:
|
89 |
+
size: 256
|
90 |
+
interpolation: 3
|
91 |
+
- target: torchvision.transforms.RandomCrop
|
92 |
+
params:
|
93 |
+
size: 256
|
94 |
+
|
95 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
96 |
+
validation:
|
97 |
+
shards: '{231318..231349}.tar -'
|
98 |
+
shuffle: 0
|
99 |
+
image_key: jpg
|
100 |
+
image_transforms:
|
101 |
+
- target: torchvision.transforms.Resize
|
102 |
+
params:
|
103 |
+
size: 256
|
104 |
+
interpolation: 3
|
105 |
+
- target: torchvision.transforms.CenterCrop
|
106 |
+
params:
|
107 |
+
size: 256
|
108 |
+
|
109 |
+
|
110 |
+
lightning:
|
111 |
+
callbacks:
|
112 |
+
image_logger:
|
113 |
+
target: main.ImageLogger
|
114 |
+
params:
|
115 |
+
batch_frequency: 5000
|
116 |
+
max_images: 8
|
117 |
+
increase_log_steps: False
|
118 |
+
log_first_step: False
|
119 |
+
|
120 |
+
|
121 |
+
trainer:
|
122 |
+
#replace_sampler_ddp: False
|
123 |
+
benchmark: True
|
124 |
+
val_check_interval: 50000
|
125 |
+
num_sanity_val_steps: 0
|
126 |
+
|
127 |
+
|
128 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-upscale-clip-encoder-f16-1024.yaml
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 5.0e-05
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentUpscaleDiffusion
|
4 |
+
params:
|
5 |
+
low_scale_key: "lr"
|
6 |
+
linear_start: 0.001
|
7 |
+
linear_end: 0.015
|
8 |
+
num_timesteps_cond: 1
|
9 |
+
log_every_t: 200
|
10 |
+
timesteps: 1000
|
11 |
+
first_stage_key: "jpg"
|
12 |
+
cond_stage_key: "txt"
|
13 |
+
image_size: 64
|
14 |
+
channels: 16
|
15 |
+
cond_stage_trainable: false
|
16 |
+
conditioning_key: "hybrid-adm"
|
17 |
+
monitor: val/loss_simple_ema
|
18 |
+
scale_factor: 0.22765929 # magic number
|
19 |
+
|
20 |
+
low_scale_config:
|
21 |
+
target: ldm.modules.encoders.modules.LowScaleEncoder
|
22 |
+
params:
|
23 |
+
scale_factor: 0.18215
|
24 |
+
linear_start: 0.00085
|
25 |
+
linear_end: 0.0120
|
26 |
+
timesteps: 1000
|
27 |
+
max_noise_level: 100
|
28 |
+
output_size: 64
|
29 |
+
model_config:
|
30 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
31 |
+
params:
|
32 |
+
embed_dim: 4
|
33 |
+
monitor: val/rec_loss
|
34 |
+
ckpt_path: "models/first_stage_models/kl-f8/model.ckpt"
|
35 |
+
ddconfig:
|
36 |
+
double_z: true
|
37 |
+
z_channels: 4
|
38 |
+
resolution: 256
|
39 |
+
in_channels: 3
|
40 |
+
out_ch: 3
|
41 |
+
ch: 128
|
42 |
+
ch_mult:
|
43 |
+
- 1
|
44 |
+
- 2
|
45 |
+
- 4
|
46 |
+
- 4
|
47 |
+
num_res_blocks: 2
|
48 |
+
attn_resolutions: [ ]
|
49 |
+
dropout: 0.0
|
50 |
+
lossconfig:
|
51 |
+
target: torch.nn.Identity
|
52 |
+
|
53 |
+
scheduler_config: # 10000 warmup steps
|
54 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
55 |
+
params:
|
56 |
+
warm_up_steps: [ 10000 ]
|
57 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
58 |
+
f_start: [ 1.e-6 ]
|
59 |
+
f_max: [ 1. ]
|
60 |
+
f_min: [ 1. ]
|
61 |
+
|
62 |
+
unet_config:
|
63 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
64 |
+
params:
|
65 |
+
num_classes: 1000 # timesteps for noise conditoining
|
66 |
+
image_size: 64 # not really needed
|
67 |
+
in_channels: 20
|
68 |
+
out_channels: 16
|
69 |
+
model_channels: 96
|
70 |
+
attention_resolutions: [ 8, 4, 2 ] # -> at 32, 16, 8
|
71 |
+
num_res_blocks: 2
|
72 |
+
channel_mult: [ 1, 2, 4, 8, 8 ]
|
73 |
+
# -> res, ds: (64, 1), (32, 2), (16, 4), (8, 8), (4, 16)
|
74 |
+
num_heads: 8
|
75 |
+
use_spatial_transformer: True
|
76 |
+
transformer_depth: 1
|
77 |
+
context_dim: 768
|
78 |
+
use_checkpoint: True
|
79 |
+
legacy: False
|
80 |
+
|
81 |
+
first_stage_config:
|
82 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
83 |
+
params:
|
84 |
+
embed_dim: 16
|
85 |
+
monitor: val/rec_loss
|
86 |
+
ckpt_path: "models/first_stage_models/kl-f16/model.ckpt"
|
87 |
+
ddconfig:
|
88 |
+
double_z: True
|
89 |
+
z_channels: 16
|
90 |
+
resolution: 256
|
91 |
+
in_channels: 3
|
92 |
+
out_ch: 3
|
93 |
+
ch: 128
|
94 |
+
ch_mult: [ 1,1,2,2,4 ] # num_down = len(ch_mult)-1
|
95 |
+
num_res_blocks: 2
|
96 |
+
attn_resolutions: [ 16 ]
|
97 |
+
dropout: 0.0
|
98 |
+
lossconfig:
|
99 |
+
target: torch.nn.Identity
|
100 |
+
|
101 |
+
cond_stage_config:
|
102 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
103 |
+
|
104 |
+
|
105 |
+
data:
|
106 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
107 |
+
params:
|
108 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion-high-resolution/"
|
109 |
+
batch_size: 10
|
110 |
+
num_workers: 4
|
111 |
+
train:
|
112 |
+
shards: '{00000..17279}.tar -'
|
113 |
+
shuffle: 10000
|
114 |
+
image_key: jpg
|
115 |
+
image_transforms:
|
116 |
+
- target: torchvision.transforms.Resize
|
117 |
+
params:
|
118 |
+
size: 1024
|
119 |
+
interpolation: 3
|
120 |
+
- target: torchvision.transforms.RandomCrop
|
121 |
+
params:
|
122 |
+
size: 1024
|
123 |
+
postprocess:
|
124 |
+
target: ldm.data.laion.AddLR
|
125 |
+
params:
|
126 |
+
factor: 4
|
127 |
+
|
128 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
129 |
+
validation:
|
130 |
+
shards: '{17280..17535}.tar -'
|
131 |
+
shuffle: 0
|
132 |
+
image_key: jpg
|
133 |
+
image_transforms:
|
134 |
+
- target: torchvision.transforms.Resize
|
135 |
+
params:
|
136 |
+
size: 1024
|
137 |
+
interpolation: 3
|
138 |
+
- target: torchvision.transforms.CenterCrop
|
139 |
+
params:
|
140 |
+
size: 1024
|
141 |
+
postprocess:
|
142 |
+
target: ldm.data.laion.AddLR
|
143 |
+
params:
|
144 |
+
factor: 4
|
145 |
+
|
146 |
+
lightning:
|
147 |
+
find_unused_parameters: False
|
148 |
+
|
149 |
+
callbacks:
|
150 |
+
image_logger:
|
151 |
+
target: main.ImageLogger
|
152 |
+
params:
|
153 |
+
batch_frequency: 1000
|
154 |
+
max_images: 4
|
155 |
+
increase_log_steps: False
|
156 |
+
log_first_step: False
|
157 |
+
log_images_kwargs:
|
158 |
+
use_ema_scope: False
|
159 |
+
inpaint: False
|
160 |
+
plot_progressive_rows: False
|
161 |
+
plot_diffusion_rows: False
|
162 |
+
N: 4
|
163 |
+
unconditional_guidance_scale: 3.0
|
164 |
+
unconditional_guidance_label: [""]
|
165 |
+
|
166 |
+
trainer:
|
167 |
+
benchmark: True
|
168 |
+
val_check_interval: 5000000 # really sorry
|
169 |
+
num_sanity_val_steps: 0
|
170 |
+
accumulate_grad_batches: 4
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-v2-clip-encoder-improved_aesthetics-256.yaml
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 8.e-05
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 32
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 10000 ]
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 32 # unused
|
32 |
+
in_channels: 4
|
33 |
+
out_channels: 4
|
34 |
+
model_channels: 416
|
35 |
+
attention_resolutions: [ 4, 2, 1 ]
|
36 |
+
num_res_blocks: [ 2, 2, 2, 2 ]
|
37 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
38 |
+
disable_self_attentions: [ False, False, False, False ] # converts the self-attention to a cross-attention layer if true
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 768
|
43 |
+
use_checkpoint: True
|
44 |
+
legacy: False
|
45 |
+
|
46 |
+
first_stage_config:
|
47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
48 |
+
params:
|
49 |
+
embed_dim: 4
|
50 |
+
monitor: val/rec_loss
|
51 |
+
ckpt_path: "/fsx/stable-diffusion/stable-diffusion/models/first_stage_models/kl-f8/model.ckpt"
|
52 |
+
ddconfig:
|
53 |
+
double_z: true
|
54 |
+
z_channels: 4
|
55 |
+
resolution: 256
|
56 |
+
in_channels: 3
|
57 |
+
out_ch: 3
|
58 |
+
ch: 128
|
59 |
+
ch_mult:
|
60 |
+
- 1
|
61 |
+
- 2
|
62 |
+
- 4
|
63 |
+
- 4
|
64 |
+
num_res_blocks: 2
|
65 |
+
attn_resolutions: []
|
66 |
+
dropout: 0.0
|
67 |
+
lossconfig:
|
68 |
+
target: torch.nn.Identity
|
69 |
+
|
70 |
+
cond_stage_config:
|
71 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
72 |
+
|
73 |
+
|
74 |
+
data:
|
75 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
76 |
+
params:
|
77 |
+
tar_base: "__improvedaesthetic__"
|
78 |
+
batch_size: 8
|
79 |
+
num_workers: 4
|
80 |
+
multinode: True
|
81 |
+
train:
|
82 |
+
shards: '{00000..17279}.tar -'
|
83 |
+
shuffle: 10000
|
84 |
+
image_key: jpg
|
85 |
+
image_transforms:
|
86 |
+
- target: torchvision.transforms.Resize
|
87 |
+
params:
|
88 |
+
size: 256
|
89 |
+
interpolation: 3
|
90 |
+
- target: torchvision.transforms.RandomCrop
|
91 |
+
params:
|
92 |
+
size: 256
|
93 |
+
|
94 |
+
# # NOTE use enough shards to avoid empty validation loops in workers
|
95 |
+
validation:
|
96 |
+
shards: '{17280..17535}.tar -'
|
97 |
+
shuffle: 0
|
98 |
+
image_key: jpg
|
99 |
+
image_transforms:
|
100 |
+
- target: torchvision.transforms.Resize
|
101 |
+
params:
|
102 |
+
size: 256
|
103 |
+
interpolation: 3
|
104 |
+
- target: torchvision.transforms.CenterCrop
|
105 |
+
params:
|
106 |
+
size: 256
|
107 |
+
|
108 |
+
|
109 |
+
lightning:
|
110 |
+
find_unused_parameters: false
|
111 |
+
modelcheckpoint:
|
112 |
+
params:
|
113 |
+
every_n_train_steps: 5000
|
114 |
+
callbacks:
|
115 |
+
image_logger:
|
116 |
+
target: main.ImageLogger
|
117 |
+
params:
|
118 |
+
disabled: True
|
119 |
+
batch_frequency: 2500
|
120 |
+
max_images: 4
|
121 |
+
increase_log_steps: False
|
122 |
+
log_first_step: False
|
123 |
+
log_images_kwargs:
|
124 |
+
use_ema_scope: False
|
125 |
+
inpaint: False
|
126 |
+
plot_progressive_rows: False
|
127 |
+
plot_diffusion_rows: False
|
128 |
+
N: 4
|
129 |
+
unconditional_guidance_scale: 3.0
|
130 |
+
unconditional_guidance_label: [""]
|
131 |
+
|
132 |
+
trainer:
|
133 |
+
#replace_sampler_ddp: False
|
134 |
+
benchmark: True
|
135 |
+
val_check_interval: 5000000 # really sorry
|
136 |
+
num_sanity_val_steps: 0
|
137 |
+
accumulate_grad_batches: 1
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/txt2img-v2-clip-encoder-improved_aesthetics-512.yaml
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 32
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 10000 ]
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 32 # unused
|
32 |
+
in_channels: 4
|
33 |
+
out_channels: 4
|
34 |
+
model_channels: 416
|
35 |
+
attention_resolutions: [ 4, 2, 1 ]
|
36 |
+
num_res_blocks: [ 2, 2, 2, 2 ]
|
37 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
38 |
+
disable_self_attentions: [ False, False, False, False ] # converts the self-attention to a cross-attention layer if true
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 768
|
43 |
+
use_checkpoint: True
|
44 |
+
legacy: False
|
45 |
+
|
46 |
+
first_stage_config:
|
47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
48 |
+
params:
|
49 |
+
embed_dim: 4
|
50 |
+
monitor: val/rec_loss
|
51 |
+
ddconfig:
|
52 |
+
double_z: true
|
53 |
+
z_channels: 4
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult:
|
59 |
+
- 1
|
60 |
+
- 2
|
61 |
+
- 4
|
62 |
+
- 4
|
63 |
+
num_res_blocks: 2
|
64 |
+
attn_resolutions: []
|
65 |
+
dropout: 0.0
|
66 |
+
lossconfig:
|
67 |
+
target: torch.nn.Identity
|
68 |
+
|
69 |
+
cond_stage_config:
|
70 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
71 |
+
|
72 |
+
|
73 |
+
data:
|
74 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
75 |
+
params:
|
76 |
+
tar_base: "__improvedaesthetic__"
|
77 |
+
batch_size: 1
|
78 |
+
num_workers: 4
|
79 |
+
multinode: True
|
80 |
+
train:
|
81 |
+
shards: '{00000..17279}.tar -'
|
82 |
+
shuffle: 10000
|
83 |
+
image_key: jpg
|
84 |
+
image_transforms:
|
85 |
+
- target: torchvision.transforms.Resize
|
86 |
+
params:
|
87 |
+
size: 512
|
88 |
+
interpolation: 3
|
89 |
+
- target: torchvision.transforms.RandomCrop
|
90 |
+
params:
|
91 |
+
size: 512
|
92 |
+
|
93 |
+
# # NOTE use enough shards to avoid empty validation loops in workers
|
94 |
+
validation:
|
95 |
+
shards: '{17280..17535}.tar -'
|
96 |
+
shuffle: 0
|
97 |
+
image_key: jpg
|
98 |
+
image_transforms:
|
99 |
+
- target: torchvision.transforms.Resize
|
100 |
+
params:
|
101 |
+
size: 512
|
102 |
+
interpolation: 3
|
103 |
+
- target: torchvision.transforms.CenterCrop
|
104 |
+
params:
|
105 |
+
size: 512
|
106 |
+
|
107 |
+
|
108 |
+
lightning:
|
109 |
+
find_unused_parameters: false
|
110 |
+
modelcheckpoint:
|
111 |
+
params:
|
112 |
+
every_n_train_steps: 5000
|
113 |
+
callbacks:
|
114 |
+
image_logger:
|
115 |
+
target: main.ImageLogger
|
116 |
+
params:
|
117 |
+
batch_frequency: 2500
|
118 |
+
max_images: 2
|
119 |
+
increase_log_steps: False
|
120 |
+
log_first_step: False
|
121 |
+
log_images_kwargs:
|
122 |
+
use_ema_scope: False
|
123 |
+
inpaint: False
|
124 |
+
plot_progressive_rows: False
|
125 |
+
plot_diffusion_rows: False
|
126 |
+
N: 2
|
127 |
+
unconditional_guidance_scale: 3.0
|
128 |
+
unconditional_guidance_label: [""]
|
129 |
+
|
130 |
+
trainer:
|
131 |
+
#replace_sampler_ddp: False
|
132 |
+
benchmark: True
|
133 |
+
val_check_interval: 5000000 # really sorry
|
134 |
+
num_sanity_val_steps: 0
|
135 |
+
accumulate_grad_batches: 2
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/upscaling/upscale-v1-with-f16.yaml
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 5.0e-05
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentUpscaleDiffusion
|
4 |
+
params:
|
5 |
+
low_scale_key: "lr"
|
6 |
+
linear_start: 0.001
|
7 |
+
linear_end: 0.015
|
8 |
+
num_timesteps_cond: 1
|
9 |
+
log_every_t: 200
|
10 |
+
timesteps: 1000
|
11 |
+
first_stage_key: "jpg"
|
12 |
+
cond_stage_key: "txt"
|
13 |
+
image_size: 32
|
14 |
+
channels: 16
|
15 |
+
cond_stage_trainable: false
|
16 |
+
conditioning_key: "hybrid-adm"
|
17 |
+
monitor: val/loss_simple_ema
|
18 |
+
scale_factor: 0.22765929 # magic number
|
19 |
+
|
20 |
+
low_scale_config:
|
21 |
+
target: ldm.modules.encoders.modules.LowScaleEncoder
|
22 |
+
params:
|
23 |
+
scale_factor: 0.18215
|
24 |
+
linear_start: 0.00085
|
25 |
+
linear_end: 0.0120
|
26 |
+
timesteps: 1000
|
27 |
+
max_noise_level: 250
|
28 |
+
output_size: null
|
29 |
+
model_config:
|
30 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
31 |
+
params:
|
32 |
+
embed_dim: 4
|
33 |
+
monitor: val/rec_loss
|
34 |
+
ckpt_path: "/fsx/stable-diffusion/stable-diffusion/models/first_stage_models/kl-f8/model.ckpt"
|
35 |
+
ddconfig:
|
36 |
+
double_z: true
|
37 |
+
z_channels: 4
|
38 |
+
resolution: 256
|
39 |
+
in_channels: 3
|
40 |
+
out_ch: 3
|
41 |
+
ch: 128
|
42 |
+
ch_mult:
|
43 |
+
- 1
|
44 |
+
- 2
|
45 |
+
- 4
|
46 |
+
- 4
|
47 |
+
num_res_blocks: 2
|
48 |
+
attn_resolutions: [ ]
|
49 |
+
dropout: 0.0
|
50 |
+
lossconfig:
|
51 |
+
target: torch.nn.Identity
|
52 |
+
|
53 |
+
scheduler_config: # 10000 warmup steps
|
54 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
55 |
+
params:
|
56 |
+
warm_up_steps: [ 10000 ]
|
57 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
58 |
+
f_start: [ 1.e-6 ]
|
59 |
+
f_max: [ 1. ]
|
60 |
+
f_min: [ 1. ]
|
61 |
+
|
62 |
+
unet_config:
|
63 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
64 |
+
params:
|
65 |
+
num_classes: 251 # timesteps for noise conditoining
|
66 |
+
image_size: 64 # not really needed
|
67 |
+
in_channels: 20
|
68 |
+
out_channels: 16
|
69 |
+
model_channels: 128
|
70 |
+
attention_resolutions: [ 8, 4, 2 ] # -> at 32, 16, 8
|
71 |
+
num_res_blocks: 2
|
72 |
+
channel_mult: [ 1, 2, 4, 6, 8 ]
|
73 |
+
# -> res, ds: (64, 1), (32, 2), (16, 4), (6, 8), (4, 16)
|
74 |
+
num_heads: 8
|
75 |
+
use_spatial_transformer: True
|
76 |
+
transformer_depth: 1
|
77 |
+
context_dim: 768
|
78 |
+
use_checkpoint: True
|
79 |
+
legacy: False
|
80 |
+
|
81 |
+
first_stage_config:
|
82 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
83 |
+
params:
|
84 |
+
embed_dim: 16
|
85 |
+
monitor: val/rec_loss
|
86 |
+
ckpt_path: "/fsx/stable-diffusion/stable-diffusion/models/first_stage_models/kl-f16/model.ckpt"
|
87 |
+
ddconfig:
|
88 |
+
double_z: True
|
89 |
+
z_channels: 16
|
90 |
+
resolution: 256
|
91 |
+
in_channels: 3
|
92 |
+
out_ch: 3
|
93 |
+
ch: 128
|
94 |
+
ch_mult: [ 1,1,2,2,4 ] # num_down = len(ch_mult)-1
|
95 |
+
num_res_blocks: 2
|
96 |
+
attn_resolutions: [ 16 ]
|
97 |
+
dropout: 0.0
|
98 |
+
lossconfig:
|
99 |
+
target: torch.nn.Identity
|
100 |
+
|
101 |
+
cond_stage_config:
|
102 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
103 |
+
|
104 |
+
|
105 |
+
#data: # TODO: finetune here later
|
106 |
+
# target: ldm.data.laion.WebDataModuleFromConfig
|
107 |
+
# params:
|
108 |
+
# tar_base: "pipe:aws s3 cp s3://s-datasets/laion-high-resolution/"
|
109 |
+
# batch_size: 10
|
110 |
+
# num_workers: 4
|
111 |
+
# train:
|
112 |
+
# shards: '{00000..17279}.tar -'
|
113 |
+
# shuffle: 10000
|
114 |
+
# image_key: jpg
|
115 |
+
# image_transforms:
|
116 |
+
# - target: torchvision.transforms.Resize
|
117 |
+
# params:
|
118 |
+
# size: 1024
|
119 |
+
# interpolation: 3
|
120 |
+
# - target: torchvision.transforms.RandomCrop
|
121 |
+
# params:
|
122 |
+
# size: 1024
|
123 |
+
# postprocess:
|
124 |
+
# target: ldm.data.laion.AddLR
|
125 |
+
# params:
|
126 |
+
# factor: 2
|
127 |
+
#
|
128 |
+
# # NOTE use enough shards to avoid empty validation loops in workers
|
129 |
+
# validation:
|
130 |
+
# shards: '{17280..17535}.tar -'
|
131 |
+
# shuffle: 0
|
132 |
+
# image_key: jpg
|
133 |
+
# image_transforms:
|
134 |
+
# - target: torchvision.transforms.Resize
|
135 |
+
# params:
|
136 |
+
# size: 1024
|
137 |
+
# interpolation: 3
|
138 |
+
# - target: torchvision.transforms.CenterCrop
|
139 |
+
# params:
|
140 |
+
# size: 1024
|
141 |
+
# postprocess:
|
142 |
+
# target: ldm.data.laion.AddLR
|
143 |
+
# params:
|
144 |
+
# factor: 2
|
145 |
+
|
146 |
+
data:
|
147 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
148 |
+
params:
|
149 |
+
tar_base: "__improvedaesthetic__"
|
150 |
+
batch_size: 28
|
151 |
+
num_workers: 4
|
152 |
+
multinode: True
|
153 |
+
min_size: 512
|
154 |
+
train:
|
155 |
+
shards: '{00000..17279}.tar -'
|
156 |
+
shuffle: 10000
|
157 |
+
image_key: jpg
|
158 |
+
image_transforms:
|
159 |
+
- target: torchvision.transforms.Resize
|
160 |
+
params:
|
161 |
+
size: 512
|
162 |
+
interpolation: 3
|
163 |
+
- target: torchvision.transforms.RandomCrop
|
164 |
+
params:
|
165 |
+
size: 512
|
166 |
+
postprocess:
|
167 |
+
target: ldm.data.laion.AddLR
|
168 |
+
params:
|
169 |
+
factor: 2
|
170 |
+
|
171 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
172 |
+
validation:
|
173 |
+
shards: '{17280..17535}.tar -'
|
174 |
+
shuffle: 0
|
175 |
+
image_key: jpg
|
176 |
+
image_transforms:
|
177 |
+
- target: torchvision.transforms.Resize
|
178 |
+
params:
|
179 |
+
size: 512
|
180 |
+
interpolation: 3
|
181 |
+
- target: torchvision.transforms.CenterCrop
|
182 |
+
params:
|
183 |
+
size: 512
|
184 |
+
postprocess:
|
185 |
+
target: ldm.data.laion.AddLR
|
186 |
+
params:
|
187 |
+
factor: 2
|
188 |
+
|
189 |
+
|
190 |
+
lightning:
|
191 |
+
find_unused_parameters: False
|
192 |
+
|
193 |
+
callbacks:
|
194 |
+
image_logger:
|
195 |
+
target: main.ImageLogger
|
196 |
+
params:
|
197 |
+
batch_frequency: 1000
|
198 |
+
max_images: 4
|
199 |
+
increase_log_steps: False
|
200 |
+
log_first_step: False
|
201 |
+
log_images_kwargs:
|
202 |
+
use_ema_scope: False
|
203 |
+
inpaint: False
|
204 |
+
plot_progressive_rows: False
|
205 |
+
plot_diffusion_rows: False
|
206 |
+
N: 4
|
207 |
+
unconditional_guidance_scale: 3.0
|
208 |
+
unconditional_guidance_label: [""]
|
209 |
+
|
210 |
+
trainer:
|
211 |
+
benchmark: True
|
212 |
+
val_check_interval: 5000000 # really sorry
|
213 |
+
num_sanity_val_steps: 0
|
214 |
+
accumulate_grad_batches: 2
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/v1-inference.yaml
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 10000 ]
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 32 # unused
|
32 |
+
in_channels: 4
|
33 |
+
out_channels: 4
|
34 |
+
model_channels: 320
|
35 |
+
attention_resolutions: [ 4, 2, 1 ]
|
36 |
+
num_res_blocks: 2
|
37 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
38 |
+
num_heads: 8
|
39 |
+
use_spatial_transformer: True
|
40 |
+
transformer_depth: 1
|
41 |
+
context_dim: 768
|
42 |
+
use_checkpoint: True
|
43 |
+
legacy: False
|
44 |
+
|
45 |
+
first_stage_config:
|
46 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
47 |
+
params:
|
48 |
+
embed_dim: 4
|
49 |
+
monitor: val/rec_loss
|
50 |
+
ddconfig:
|
51 |
+
double_z: true
|
52 |
+
z_channels: 4
|
53 |
+
resolution: 256
|
54 |
+
in_channels: 3
|
55 |
+
out_ch: 3
|
56 |
+
ch: 128
|
57 |
+
ch_mult:
|
58 |
+
- 1
|
59 |
+
- 2
|
60 |
+
- 4
|
61 |
+
- 4
|
62 |
+
num_res_blocks: 2
|
63 |
+
attn_resolutions: []
|
64 |
+
dropout: 0.0
|
65 |
+
lossconfig:
|
66 |
+
target: torch.nn.Identity
|
67 |
+
|
68 |
+
cond_stage_config:
|
69 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/v1_improvedaesthetics.yaml
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 1 ] # NOTE for resuming. use 10000 if starting from scratch
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 32 # unused
|
32 |
+
in_channels: 4
|
33 |
+
out_channels: 4
|
34 |
+
model_channels: 320
|
35 |
+
attention_resolutions: [ 4, 2, 1 ]
|
36 |
+
num_res_blocks: 2
|
37 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
38 |
+
num_heads: 8
|
39 |
+
use_spatial_transformer: True
|
40 |
+
transformer_depth: 1
|
41 |
+
context_dim: 768
|
42 |
+
use_checkpoint: True
|
43 |
+
legacy: False
|
44 |
+
|
45 |
+
first_stage_config:
|
46 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
47 |
+
params:
|
48 |
+
embed_dim: 4
|
49 |
+
monitor: val/rec_loss
|
50 |
+
ddconfig:
|
51 |
+
double_z: true
|
52 |
+
z_channels: 4
|
53 |
+
resolution: 256
|
54 |
+
in_channels: 3
|
55 |
+
out_ch: 3
|
56 |
+
ch: 128
|
57 |
+
ch_mult:
|
58 |
+
- 1
|
59 |
+
- 2
|
60 |
+
- 4
|
61 |
+
- 4
|
62 |
+
num_res_blocks: 2
|
63 |
+
attn_resolutions: []
|
64 |
+
dropout: 0.0
|
65 |
+
lossconfig:
|
66 |
+
target: torch.nn.Identity
|
67 |
+
|
68 |
+
cond_stage_config:
|
69 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
70 |
+
|
71 |
+
|
72 |
+
data:
|
73 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
74 |
+
params:
|
75 |
+
tar_base: "__improvedaesthetic__"
|
76 |
+
batch_size: 4
|
77 |
+
num_workers: 4
|
78 |
+
multinode: True
|
79 |
+
train:
|
80 |
+
shards: '{00000..17279}.tar -'
|
81 |
+
shuffle: 10000
|
82 |
+
image_key: jpg
|
83 |
+
image_transforms:
|
84 |
+
- target: torchvision.transforms.Resize
|
85 |
+
params:
|
86 |
+
size: 512
|
87 |
+
interpolation: 3
|
88 |
+
- target: torchvision.transforms.RandomCrop
|
89 |
+
params:
|
90 |
+
size: 512
|
91 |
+
|
92 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
93 |
+
validation:
|
94 |
+
shards: '{17280..17535}.tar -'
|
95 |
+
shuffle: 0
|
96 |
+
image_key: jpg
|
97 |
+
image_transforms:
|
98 |
+
- target: torchvision.transforms.Resize
|
99 |
+
params:
|
100 |
+
size: 512
|
101 |
+
interpolation: 3
|
102 |
+
- target: torchvision.transforms.CenterCrop
|
103 |
+
params:
|
104 |
+
size: 512
|
105 |
+
|
106 |
+
|
107 |
+
lightning:
|
108 |
+
find_unused_parameters: False
|
109 |
+
|
110 |
+
modelcheckpoint:
|
111 |
+
params:
|
112 |
+
every_n_train_steps: 5000
|
113 |
+
|
114 |
+
callbacks:
|
115 |
+
image_logger:
|
116 |
+
target: main.ImageLogger
|
117 |
+
params:
|
118 |
+
batch_frequency: 5000
|
119 |
+
max_images: 4
|
120 |
+
increase_log_steps: False
|
121 |
+
log_first_step: False
|
122 |
+
log_images_kwargs:
|
123 |
+
use_ema_scope: False
|
124 |
+
inpaint: False
|
125 |
+
plot_progressive_rows: False
|
126 |
+
plot_diffusion_rows: False
|
127 |
+
N: 4
|
128 |
+
unconditional_guidance_scale: 3.0
|
129 |
+
unconditional_guidance_label: [""]
|
130 |
+
|
131 |
+
trainer:
|
132 |
+
benchmark: True
|
133 |
+
val_check_interval: 5000000 # really sorry
|
134 |
+
num_sanity_val_steps: 0
|
135 |
+
accumulate_grad_batches: 2
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/v1_laionhr.yaml
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 1 ] # NOTE for resuming. use 10000 if starting from scratch
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 32 # unused
|
32 |
+
in_channels: 4
|
33 |
+
out_channels: 4
|
34 |
+
model_channels: 320
|
35 |
+
attention_resolutions: [ 4, 2, 1 ]
|
36 |
+
num_res_blocks: 2
|
37 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
38 |
+
num_heads: 8
|
39 |
+
use_spatial_transformer: True
|
40 |
+
transformer_depth: 1
|
41 |
+
context_dim: 768
|
42 |
+
use_checkpoint: True
|
43 |
+
legacy: False
|
44 |
+
|
45 |
+
first_stage_config:
|
46 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
47 |
+
params:
|
48 |
+
embed_dim: 4
|
49 |
+
monitor: val/rec_loss
|
50 |
+
ddconfig:
|
51 |
+
double_z: true
|
52 |
+
z_channels: 4
|
53 |
+
resolution: 256
|
54 |
+
in_channels: 3
|
55 |
+
out_ch: 3
|
56 |
+
ch: 128
|
57 |
+
ch_mult:
|
58 |
+
- 1
|
59 |
+
- 2
|
60 |
+
- 4
|
61 |
+
- 4
|
62 |
+
num_res_blocks: 2
|
63 |
+
attn_resolutions: []
|
64 |
+
dropout: 0.0
|
65 |
+
lossconfig:
|
66 |
+
target: torch.nn.Identity
|
67 |
+
|
68 |
+
cond_stage_config:
|
69 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
70 |
+
|
71 |
+
|
72 |
+
data:
|
73 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
74 |
+
params:
|
75 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion-high-resolution/"
|
76 |
+
batch_size: 4
|
77 |
+
num_workers: 4
|
78 |
+
multinode: True
|
79 |
+
train:
|
80 |
+
shards: '{00000..17279}.tar -'
|
81 |
+
shuffle: 10000
|
82 |
+
image_key: jpg
|
83 |
+
image_transforms:
|
84 |
+
- target: torchvision.transforms.Resize
|
85 |
+
params:
|
86 |
+
size: 512
|
87 |
+
interpolation: 3
|
88 |
+
- target: torchvision.transforms.RandomCrop
|
89 |
+
params:
|
90 |
+
size: 512
|
91 |
+
|
92 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
93 |
+
validation:
|
94 |
+
shards: '{17280..17535}.tar -'
|
95 |
+
shuffle: 0
|
96 |
+
image_key: jpg
|
97 |
+
image_transforms:
|
98 |
+
- target: torchvision.transforms.Resize
|
99 |
+
params:
|
100 |
+
size: 512
|
101 |
+
interpolation: 3
|
102 |
+
- target: torchvision.transforms.CenterCrop
|
103 |
+
params:
|
104 |
+
size: 512
|
105 |
+
|
106 |
+
|
107 |
+
lightning:
|
108 |
+
find_unused_parameters: False
|
109 |
+
|
110 |
+
modelcheckpoint:
|
111 |
+
params:
|
112 |
+
every_n_train_steps: 5000
|
113 |
+
|
114 |
+
callbacks:
|
115 |
+
image_logger:
|
116 |
+
target: main.ImageLogger
|
117 |
+
params:
|
118 |
+
batch_frequency: 5000
|
119 |
+
max_images: 4
|
120 |
+
increase_log_steps: False
|
121 |
+
log_first_step: False
|
122 |
+
log_images_kwargs:
|
123 |
+
use_ema_scope: False
|
124 |
+
inpaint: False
|
125 |
+
plot_progressive_rows: False
|
126 |
+
plot_diffusion_rows: False
|
127 |
+
N: 4
|
128 |
+
unconditional_guidance_scale: 3.0
|
129 |
+
unconditional_guidance_label: [""]
|
130 |
+
|
131 |
+
trainer:
|
132 |
+
benchmark: True
|
133 |
+
val_check_interval: 5000000 # really sorry
|
134 |
+
num_sanity_val_steps: 0
|
135 |
+
accumulate_grad_batches: 2
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/v2_laionhr1024.yaml
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.001
|
6 |
+
linear_end: 0.015
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 16
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.22765929 # magic number
|
18 |
+
|
19 |
+
# NOTE disabled for resuming
|
20 |
+
#scheduler_config: # 10000 warmup steps
|
21 |
+
# target: ldm.lr_scheduler.LambdaLinearScheduler
|
22 |
+
# params:
|
23 |
+
# warm_up_steps: [ 10000 ]
|
24 |
+
# cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
25 |
+
# f_start: [ 1.e-6 ]
|
26 |
+
# f_max: [ 1. ]
|
27 |
+
# f_min: [ 1. ]
|
28 |
+
|
29 |
+
unet_config:
|
30 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
31 |
+
params:
|
32 |
+
image_size: 64 # not really needed
|
33 |
+
in_channels: 16
|
34 |
+
out_channels: 16
|
35 |
+
model_channels: 320
|
36 |
+
attention_resolutions: [ 4, 2, 1 ]
|
37 |
+
num_res_blocks: 2
|
38 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 768
|
43 |
+
use_checkpoint: True
|
44 |
+
legacy: False
|
45 |
+
|
46 |
+
first_stage_config:
|
47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
48 |
+
params:
|
49 |
+
embed_dim: 16
|
50 |
+
monitor: val/rec_loss
|
51 |
+
ddconfig:
|
52 |
+
double_z: True
|
53 |
+
z_channels: 16
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult: [ 1,1,2,2,4 ] # num_down = len(ch_mult)-1
|
59 |
+
num_res_blocks: 2
|
60 |
+
attn_resolutions: [ 16 ]
|
61 |
+
dropout: 0.0
|
62 |
+
lossconfig:
|
63 |
+
target: torch.nn.Identity
|
64 |
+
|
65 |
+
cond_stage_config:
|
66 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
67 |
+
|
68 |
+
|
69 |
+
data:
|
70 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
71 |
+
params:
|
72 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion-high-resolution/"
|
73 |
+
batch_size: 3
|
74 |
+
num_workers: 4
|
75 |
+
multinode: True
|
76 |
+
train:
|
77 |
+
shards: '{00000..17279}.tar -'
|
78 |
+
shuffle: 10000
|
79 |
+
image_key: jpg
|
80 |
+
image_transforms:
|
81 |
+
- target: torchvision.transforms.Resize
|
82 |
+
params:
|
83 |
+
size: 1024
|
84 |
+
interpolation: 3
|
85 |
+
- target: torchvision.transforms.RandomCrop
|
86 |
+
params:
|
87 |
+
size: 1024
|
88 |
+
|
89 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
90 |
+
validation:
|
91 |
+
shards: '{17280..17535}.tar -'
|
92 |
+
shuffle: 0
|
93 |
+
image_key: jpg
|
94 |
+
image_transforms:
|
95 |
+
- target: torchvision.transforms.Resize
|
96 |
+
params:
|
97 |
+
size: 1024
|
98 |
+
interpolation: 3
|
99 |
+
- target: torchvision.transforms.CenterCrop
|
100 |
+
params:
|
101 |
+
size: 1024
|
102 |
+
|
103 |
+
|
104 |
+
lightning:
|
105 |
+
find_unused_parameters: False
|
106 |
+
|
107 |
+
modelcheckpoint:
|
108 |
+
params:
|
109 |
+
every_n_train_steps: 2000
|
110 |
+
|
111 |
+
callbacks:
|
112 |
+
image_logger:
|
113 |
+
target: main.ImageLogger
|
114 |
+
params:
|
115 |
+
batch_frequency: 2000
|
116 |
+
max_images: 2
|
117 |
+
increase_log_steps: False
|
118 |
+
log_first_step: False
|
119 |
+
log_images_kwargs:
|
120 |
+
use_ema_scope: False
|
121 |
+
inpaint: False
|
122 |
+
plot_progressive_rows: False
|
123 |
+
plot_diffusion_rows: False
|
124 |
+
N: 2
|
125 |
+
unconditional_guidance_scale: 5.0
|
126 |
+
unconditional_guidance_label: [""]
|
127 |
+
|
128 |
+
trainer:
|
129 |
+
benchmark: True
|
130 |
+
val_check_interval: 5000000
|
131 |
+
num_sanity_val_steps: 0
|
132 |
+
accumulate_grad_batches: 4
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/v2_laionhr1024_2.yaml
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 7.5e-05
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.001
|
6 |
+
linear_end: 0.015
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 64
|
13 |
+
channels: 16
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.22765929 # magic number
|
18 |
+
|
19 |
+
# NOTE disabled for resuming
|
20 |
+
#scheduler_config: # 10000 warmup steps
|
21 |
+
# target: ldm.lr_scheduler.LambdaLinearScheduler
|
22 |
+
# params:
|
23 |
+
# warm_up_steps: [ 10000 ]
|
24 |
+
# cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
25 |
+
# f_start: [ 1.e-6 ]
|
26 |
+
# f_max: [ 1. ]
|
27 |
+
# f_min: [ 1. ]
|
28 |
+
|
29 |
+
unet_config:
|
30 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
31 |
+
params:
|
32 |
+
image_size: 64 # not really needed
|
33 |
+
in_channels: 16
|
34 |
+
out_channels: 16
|
35 |
+
model_channels: 320
|
36 |
+
attention_resolutions: [ 4, 2, 1 ]
|
37 |
+
num_res_blocks: 2
|
38 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 768
|
43 |
+
use_checkpoint: True
|
44 |
+
legacy: False
|
45 |
+
|
46 |
+
first_stage_config:
|
47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
48 |
+
params:
|
49 |
+
embed_dim: 16
|
50 |
+
monitor: val/rec_loss
|
51 |
+
ddconfig:
|
52 |
+
double_z: True
|
53 |
+
z_channels: 16
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult: [ 1,1,2,2,4 ] # num_down = len(ch_mult)-1
|
59 |
+
num_res_blocks: 2
|
60 |
+
attn_resolutions: [ 16 ]
|
61 |
+
dropout: 0.0
|
62 |
+
lossconfig:
|
63 |
+
target: torch.nn.Identity
|
64 |
+
|
65 |
+
cond_stage_config:
|
66 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
67 |
+
|
68 |
+
|
69 |
+
data:
|
70 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
71 |
+
params:
|
72 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion-high-resolution/"
|
73 |
+
batch_size: 3
|
74 |
+
num_workers: 4
|
75 |
+
multinode: True
|
76 |
+
train:
|
77 |
+
shards: '{00000..17279}.tar -'
|
78 |
+
shuffle: 10000
|
79 |
+
image_key: jpg
|
80 |
+
image_transforms:
|
81 |
+
- target: torchvision.transforms.Resize
|
82 |
+
params:
|
83 |
+
size: 1024
|
84 |
+
interpolation: 3
|
85 |
+
- target: torchvision.transforms.RandomCrop
|
86 |
+
params:
|
87 |
+
size: 1024
|
88 |
+
|
89 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
90 |
+
validation:
|
91 |
+
shards: '{17280..17535}.tar -'
|
92 |
+
shuffle: 0
|
93 |
+
image_key: jpg
|
94 |
+
image_transforms:
|
95 |
+
- target: torchvision.transforms.Resize
|
96 |
+
params:
|
97 |
+
size: 1024
|
98 |
+
interpolation: 3
|
99 |
+
- target: torchvision.transforms.CenterCrop
|
100 |
+
params:
|
101 |
+
size: 1024
|
102 |
+
|
103 |
+
|
104 |
+
lightning:
|
105 |
+
find_unused_parameters: False
|
106 |
+
|
107 |
+
modelcheckpoint:
|
108 |
+
params:
|
109 |
+
every_n_train_steps: 2000
|
110 |
+
|
111 |
+
callbacks:
|
112 |
+
image_logger:
|
113 |
+
target: main.ImageLogger
|
114 |
+
params:
|
115 |
+
batch_frequency: 2000
|
116 |
+
max_images: 2
|
117 |
+
increase_log_steps: False
|
118 |
+
log_first_step: False
|
119 |
+
log_images_kwargs:
|
120 |
+
use_ema_scope: False
|
121 |
+
inpaint: False
|
122 |
+
plot_progressive_rows: False
|
123 |
+
plot_diffusion_rows: False
|
124 |
+
N: 2
|
125 |
+
unconditional_guidance_scale: 5.0
|
126 |
+
unconditional_guidance_label: [""]
|
127 |
+
|
128 |
+
trainer:
|
129 |
+
benchmark: True
|
130 |
+
val_check_interval: 5000000
|
131 |
+
num_sanity_val_steps: 0
|
132 |
+
accumulate_grad_batches: 2
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/v2_pretraining.yaml
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 1.0e-04
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.001
|
6 |
+
linear_end: 0.015
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 16
|
13 |
+
channels: 16
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.22765929 # magic number
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 10000 ]
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 16 # not really needed
|
32 |
+
in_channels: 16
|
33 |
+
out_channels: 16
|
34 |
+
model_channels: 320 # TODO: scale model here
|
35 |
+
attention_resolutions: [ 4, 2, 1 ]
|
36 |
+
num_res_blocks: 2
|
37 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
38 |
+
num_heads: 8
|
39 |
+
use_spatial_transformer: True
|
40 |
+
transformer_depth: 1
|
41 |
+
context_dim: 768
|
42 |
+
use_checkpoint: True
|
43 |
+
legacy: False
|
44 |
+
|
45 |
+
first_stage_config:
|
46 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
47 |
+
params:
|
48 |
+
embed_dim: 16
|
49 |
+
monitor: val/rec_loss
|
50 |
+
ckpt_path: "models/first_stage_models/kl-f16/model.ckpt"
|
51 |
+
ddconfig:
|
52 |
+
double_z: True
|
53 |
+
z_channels: 16
|
54 |
+
resolution: 256
|
55 |
+
in_channels: 3
|
56 |
+
out_ch: 3
|
57 |
+
ch: 128
|
58 |
+
ch_mult: [ 1,1,2,2,4 ] # num_down = len(ch_mult)-1
|
59 |
+
num_res_blocks: 2
|
60 |
+
attn_resolutions: [ 16 ]
|
61 |
+
dropout: 0.0
|
62 |
+
lossconfig:
|
63 |
+
target: torch.nn.Identity
|
64 |
+
|
65 |
+
cond_stage_config:
|
66 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
67 |
+
|
68 |
+
|
69 |
+
data:
|
70 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
71 |
+
params:
|
72 |
+
tar_base: "pipe:aws s3 cp s3://s-datasets/laion5b/laion2B-data/"
|
73 |
+
batch_size: 55
|
74 |
+
num_workers: 4
|
75 |
+
multinode: True
|
76 |
+
min_size: 256
|
77 |
+
train:
|
78 |
+
shards: '{000000..231317}.tar -'
|
79 |
+
shuffle: 10000
|
80 |
+
image_key: jpg
|
81 |
+
image_transforms:
|
82 |
+
- target: torchvision.transforms.Resize
|
83 |
+
params:
|
84 |
+
size: 256
|
85 |
+
interpolation: 3
|
86 |
+
- target: torchvision.transforms.RandomCrop
|
87 |
+
params:
|
88 |
+
size: 256
|
89 |
+
|
90 |
+
# NOTE use enough shards to avoid empty validation loops in workers
|
91 |
+
validation:
|
92 |
+
shards: '{231318..231349}.tar -'
|
93 |
+
shuffle: 0
|
94 |
+
image_key: jpg
|
95 |
+
image_transforms:
|
96 |
+
- target: torchvision.transforms.Resize
|
97 |
+
params:
|
98 |
+
size: 256
|
99 |
+
interpolation: 3
|
100 |
+
- target: torchvision.transforms.CenterCrop
|
101 |
+
params:
|
102 |
+
size: 256
|
103 |
+
|
104 |
+
|
105 |
+
lightning:
|
106 |
+
find_unused_parameters: false
|
107 |
+
modelcheckpoint:
|
108 |
+
params:
|
109 |
+
every_n_train_steps: 5000
|
110 |
+
callbacks:
|
111 |
+
image_logger:
|
112 |
+
target: main.ImageLogger
|
113 |
+
params:
|
114 |
+
batch_frequency: 5000
|
115 |
+
max_images: 4
|
116 |
+
increase_log_steps: False
|
117 |
+
log_first_step: False
|
118 |
+
log_images_kwargs:
|
119 |
+
use_ema_scope: False
|
120 |
+
inpaint: False
|
121 |
+
plot_progressive_rows: False
|
122 |
+
plot_diffusion_rows: False
|
123 |
+
N: 4
|
124 |
+
unconditional_guidance_scale: 3.0
|
125 |
+
unconditional_guidance_label: [""]
|
126 |
+
|
127 |
+
trainer:
|
128 |
+
benchmark: True
|
129 |
+
val_check_interval: 5000000 # really sorry
|
130 |
+
num_sanity_val_steps: 0
|
131 |
+
accumulate_grad_batches: 1
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/configs/stable-diffusion/v3_pretraining.yaml
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 8.e-05
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.0120
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: "jpg"
|
11 |
+
cond_stage_key: "txt"
|
12 |
+
image_size: 32
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
|
19 |
+
scheduler_config: # 10000 warmup steps
|
20 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
21 |
+
params:
|
22 |
+
warm_up_steps: [ 10000 ]
|
23 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
24 |
+
f_start: [ 1.e-6 ]
|
25 |
+
f_max: [ 1. ]
|
26 |
+
f_min: [ 1. ]
|
27 |
+
|
28 |
+
unet_config:
|
29 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
30 |
+
params:
|
31 |
+
image_size: 32 # unused
|
32 |
+
in_channels: 4
|
33 |
+
out_channels: 4
|
34 |
+
model_channels: 416
|
35 |
+
attention_resolutions: [ 4, 2, 1 ]
|
36 |
+
num_res_blocks: [ 2, 2, 2, 2 ]
|
37 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
38 |
+
disable_self_attentions: [ False, False, False, False ] # converts the self-attention to a cross-attention layer if true
|
39 |
+
num_heads: 8
|
40 |
+
use_spatial_transformer: True
|
41 |
+
transformer_depth: 1
|
42 |
+
context_dim: 768
|
43 |
+
use_checkpoint: True
|
44 |
+
legacy: False
|
45 |
+
|
46 |
+
first_stage_config:
|
47 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
48 |
+
params:
|
49 |
+
embed_dim: 4
|
50 |
+
monitor: val/rec_loss
|
51 |
+
ckpt_path: "/fsx/stable-diffusion/stable-diffusion/models/first_stage_models/kl-f8/model.ckpt"
|
52 |
+
ddconfig:
|
53 |
+
double_z: true
|
54 |
+
z_channels: 4
|
55 |
+
resolution: 256
|
56 |
+
in_channels: 3
|
57 |
+
out_ch: 3
|
58 |
+
ch: 128
|
59 |
+
ch_mult:
|
60 |
+
- 1
|
61 |
+
- 2
|
62 |
+
- 4
|
63 |
+
- 4
|
64 |
+
num_res_blocks: 2
|
65 |
+
attn_resolutions: []
|
66 |
+
dropout: 0.0
|
67 |
+
lossconfig:
|
68 |
+
target: torch.nn.Identity
|
69 |
+
|
70 |
+
cond_stage_config:
|
71 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
72 |
+
|
73 |
+
|
74 |
+
data:
|
75 |
+
target: ldm.data.laion.WebDataModuleFromConfig
|
76 |
+
params:
|
77 |
+
tar_base: "__improvedaesthetic__"
|
78 |
+
batch_size: 8
|
79 |
+
num_workers: 4
|
80 |
+
multinode: True
|
81 |
+
train:
|
82 |
+
shards: '{00000..17279}.tar -'
|
83 |
+
shuffle: 10000
|
84 |
+
image_key: jpg
|
85 |
+
image_transforms:
|
86 |
+
- target: torchvision.transforms.Resize
|
87 |
+
params:
|
88 |
+
size: 256
|
89 |
+
interpolation: 3
|
90 |
+
- target: torchvision.transforms.RandomCrop
|
91 |
+
params:
|
92 |
+
size: 256
|
93 |
+
|
94 |
+
# # NOTE use enough shards to avoid empty validation loops in workers
|
95 |
+
validation:
|
96 |
+
shards: '{17280..17535}.tar -'
|
97 |
+
shuffle: 0
|
98 |
+
image_key: jpg
|
99 |
+
image_transforms:
|
100 |
+
- target: torchvision.transforms.Resize
|
101 |
+
params:
|
102 |
+
size: 256
|
103 |
+
interpolation: 3
|
104 |
+
- target: torchvision.transforms.CenterCrop
|
105 |
+
params:
|
106 |
+
size: 256
|
107 |
+
|
108 |
+
|
109 |
+
lightning:
|
110 |
+
find_unused_parameters: false
|
111 |
+
modelcheckpoint:
|
112 |
+
params:
|
113 |
+
every_n_train_steps: 5000
|
114 |
+
callbacks:
|
115 |
+
image_logger:
|
116 |
+
target: main.ImageLogger
|
117 |
+
params:
|
118 |
+
disabled: True
|
119 |
+
batch_frequency: 2500
|
120 |
+
max_images: 4
|
121 |
+
increase_log_steps: False
|
122 |
+
log_first_step: False
|
123 |
+
log_images_kwargs:
|
124 |
+
use_ema_scope: False
|
125 |
+
inpaint: False
|
126 |
+
plot_progressive_rows: False
|
127 |
+
plot_diffusion_rows: False
|
128 |
+
N: 4
|
129 |
+
unconditional_guidance_scale: 3.0
|
130 |
+
unconditional_guidance_label: [""]
|
131 |
+
|
132 |
+
trainer:
|
133 |
+
#replace_sampler_ddp: False
|
134 |
+
benchmark: True
|
135 |
+
val_check_interval: 5000000 # really sorry
|
136 |
+
num_sanity_val_steps: 0
|
137 |
+
accumulate_grad_batches: 1
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/__init__.py
ADDED
File without changes
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/base.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from abc import abstractmethod
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset
|
6 |
+
import os
|
7 |
+
import numpy as np
|
8 |
+
#import cv2
|
9 |
+
from PIL import Image
|
10 |
+
import torch.distributed as dist
|
11 |
+
|
12 |
+
def get_rank():
|
13 |
+
if not dist.is_available():
|
14 |
+
return 0
|
15 |
+
if not dist.is_initialized():
|
16 |
+
return 0
|
17 |
+
return dist.get_rank()
|
18 |
+
|
19 |
+
class Txt2ImgIterableBaseDataset(IterableDataset):
|
20 |
+
'''
|
21 |
+
Define an interface to make the IterableDatasets for text2img data chainable
|
22 |
+
'''
|
23 |
+
def __init__(self, file_path: str, rank, world_size,**kwargs):
|
24 |
+
super().__init__()
|
25 |
+
self.file_path = file_path
|
26 |
+
self.folder_list = []
|
27 |
+
self.file_list = []
|
28 |
+
self.txt_list = []
|
29 |
+
self.info = self._get_file_info(file_path)
|
30 |
+
self.start = self.info['start']
|
31 |
+
self.end = self.info['end']
|
32 |
+
#self.rank = int(rank)
|
33 |
+
self.rank = get_rank()
|
34 |
+
self.world_size = world_size
|
35 |
+
self.per_worker = int(math.floor((self.end - self.start) / float(self.world_size)))
|
36 |
+
self.iter_start = self.start + self.rank * self.per_worker
|
37 |
+
self.iter_end = min(self.iter_start + self.per_worker, self.end)
|
38 |
+
self.num_records = self.iter_end - self.iter_start
|
39 |
+
self.valid_ids = [i for i in range(self.iter_end)]
|
40 |
+
#self.num_records = self.end - self.start
|
41 |
+
#self.valid_ids = [i for i in range(self.end)]
|
42 |
+
self.transforms = self.get_transforms(kwargs)
|
43 |
+
print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples (rank: {self.rank}/{world_size})\nstart {self.iter_start} end {self.iter_end}')
|
44 |
+
|
45 |
+
def get_transforms(self,dataset_config):
|
46 |
+
import torchvision
|
47 |
+
from ldm.util import instantiate_from_config
|
48 |
+
from einops import rearrange
|
49 |
+
if 'image_transforms' in dataset_config:
|
50 |
+
image_transforms = [instantiate_from_config(tt) for tt in dataset_config['image_transforms']]
|
51 |
+
else:
|
52 |
+
image_transforms = []
|
53 |
+
|
54 |
+
image_transforms.extend([torchvision.transforms.ToTensor(),
|
55 |
+
torchvision.transforms.Lambda(lambda x: rearrange(x * 2. - 1., 'c h w -> h w c'))])
|
56 |
+
return torchvision.transforms.Compose(image_transforms)
|
57 |
+
|
58 |
+
# if 'transforms' in dataset_config:
|
59 |
+
# transforms_config = OmegaConf.to_container(dataset_config.transforms)
|
60 |
+
# else:
|
61 |
+
# transforms_config = dict()
|
62 |
+
|
63 |
+
# transform_dict = {dkey: load_partial_from_config(transforms_config[dkey])
|
64 |
+
# if transforms_config[dkey] != 'identity' else identity
|
65 |
+
# for dkey in transforms_config}
|
66 |
+
# img_key = dataset_config.get('image_key', 'jpeg')
|
67 |
+
# transform_dict.update({img_key: image_transforms})
|
68 |
+
|
69 |
+
def __len__(self):
|
70 |
+
return self.iter_end - self.iter_start
|
71 |
+
#return self.end - self.start
|
72 |
+
|
73 |
+
def __iter__(self):
|
74 |
+
#sample_iterator = self._sample_generator(self.start, self.end)
|
75 |
+
sample_iterator = self._sample_generator(self.iter_start, self.iter_end)
|
76 |
+
return sample_iterator
|
77 |
+
|
78 |
+
def _sample_generator(self, start, end):
|
79 |
+
for idx in range(start, end):
|
80 |
+
file_name = self.file_list[idx]
|
81 |
+
txt_name = self.txt_list[idx]
|
82 |
+
f_ = open(txt_name, 'r')
|
83 |
+
txt_ = f_.read()
|
84 |
+
f_.close()
|
85 |
+
#image = cv2.imdecode(np.fromfile(file_name, dtype=np.uint8), 1)
|
86 |
+
#image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
87 |
+
#image = torch.from_numpy(image) / 255
|
88 |
+
image = Image.open(file_name).convert('RGB')
|
89 |
+
image = self.transforms(image)
|
90 |
+
yield {"caption": txt_, "image":image}
|
91 |
+
|
92 |
+
|
93 |
+
def _get_file_info(self, file_path):
|
94 |
+
info = \
|
95 |
+
{
|
96 |
+
"start": 1,
|
97 |
+
"end": 0,
|
98 |
+
}
|
99 |
+
self.folder_list = [file_path + i for i in os.listdir(file_path) if '.' not in i]
|
100 |
+
for folder in self.folder_list:
|
101 |
+
files = [folder + '/' + i for i in os.listdir(folder) if 'jpg' in i]
|
102 |
+
txts = [k.replace('jpg', 'txt') for k in files]
|
103 |
+
self.file_list.extend(files)
|
104 |
+
self.txt_list.extend(txts)
|
105 |
+
info['end'] = len(self.file_list)
|
106 |
+
# with open(file_path, 'r') as fin:
|
107 |
+
# for _ in enumerate(fin):
|
108 |
+
# info['end'] += 1
|
109 |
+
# self.txt_list = [k.replace('jpg', 'txt') for k in self.file_list]
|
110 |
+
return info
|
111 |
+
|
112 |
+
class PRNGMixin(object):
|
113 |
+
"""
|
114 |
+
Adds a prng property which is a numpy RandomState which gets
|
115 |
+
reinitialized whenever the pid changes to avoid synchronized sampling
|
116 |
+
behavior when used in conjunction with multiprocessing.
|
117 |
+
"""
|
118 |
+
@property
|
119 |
+
def prng(self):
|
120 |
+
currentpid = os.getpid()
|
121 |
+
if getattr(self, "_initpid", None) != currentpid:
|
122 |
+
self._initpid = currentpid
|
123 |
+
self._prng = np.random.RandomState()
|
124 |
+
return self._prng
|
125 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/coco.py
ADDED
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import albumentations
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
from tqdm import tqdm
|
7 |
+
from torch.utils.data import Dataset
|
8 |
+
from abc import abstractmethod
|
9 |
+
|
10 |
+
|
11 |
+
class CocoBase(Dataset):
|
12 |
+
"""needed for (image, caption, segmentation) pairs"""
|
13 |
+
def __init__(self, size=None, dataroot="", datajson="", onehot_segmentation=False, use_stuffthing=False,
|
14 |
+
crop_size=None, force_no_crop=False, given_files=None, use_segmentation=True,crop_type=None):
|
15 |
+
self.split = self.get_split()
|
16 |
+
self.size = size
|
17 |
+
if crop_size is None:
|
18 |
+
self.crop_size = size
|
19 |
+
else:
|
20 |
+
self.crop_size = crop_size
|
21 |
+
|
22 |
+
assert crop_type in [None, 'random', 'center']
|
23 |
+
self.crop_type = crop_type
|
24 |
+
self.use_segmenation = use_segmentation
|
25 |
+
self.onehot = onehot_segmentation # return segmentation as rgb or one hot
|
26 |
+
self.stuffthing = use_stuffthing # include thing in segmentation
|
27 |
+
if self.onehot and not self.stuffthing:
|
28 |
+
raise NotImplemented("One hot mode is only supported for the "
|
29 |
+
"stuffthings version because labels are stored "
|
30 |
+
"a bit different.")
|
31 |
+
|
32 |
+
data_json = datajson
|
33 |
+
with open(data_json) as json_file:
|
34 |
+
self.json_data = json.load(json_file)
|
35 |
+
self.img_id_to_captions = dict()
|
36 |
+
self.img_id_to_filepath = dict()
|
37 |
+
self.img_id_to_segmentation_filepath = dict()
|
38 |
+
|
39 |
+
assert data_json.split("/")[-1] in [f"captions_train{self.year()}.json",
|
40 |
+
f"captions_val{self.year()}.json"]
|
41 |
+
# TODO currently hardcoded paths, would be better to follow logic in
|
42 |
+
# cocstuff pixelmaps
|
43 |
+
if self.use_segmenation:
|
44 |
+
if self.stuffthing:
|
45 |
+
self.segmentation_prefix = (
|
46 |
+
f"data/cocostuffthings/val{self.year()}" if
|
47 |
+
data_json.endswith(f"captions_val{self.year()}.json") else
|
48 |
+
f"data/cocostuffthings/train{self.year()}")
|
49 |
+
else:
|
50 |
+
self.segmentation_prefix = (
|
51 |
+
f"data/coco/annotations/stuff_val{self.year()}_pixelmaps" if
|
52 |
+
data_json.endswith(f"captions_val{self.year()}.json") else
|
53 |
+
f"data/coco/annotations/stuff_train{self.year()}_pixelmaps")
|
54 |
+
|
55 |
+
imagedirs = self.json_data["images"]
|
56 |
+
self.labels = {"image_ids": list()}
|
57 |
+
for imgdir in tqdm(imagedirs, desc="ImgToPath"):
|
58 |
+
self.img_id_to_filepath[imgdir["id"]] = os.path.join(dataroot, imgdir["file_name"])
|
59 |
+
self.img_id_to_captions[imgdir["id"]] = list()
|
60 |
+
pngfilename = imgdir["file_name"].replace("jpg", "png")
|
61 |
+
if self.use_segmenation:
|
62 |
+
self.img_id_to_segmentation_filepath[imgdir["id"]] = os.path.join(
|
63 |
+
self.segmentation_prefix, pngfilename)
|
64 |
+
if given_files is not None:
|
65 |
+
if pngfilename in given_files:
|
66 |
+
self.labels["image_ids"].append(imgdir["id"])
|
67 |
+
else:
|
68 |
+
self.labels["image_ids"].append(imgdir["id"])
|
69 |
+
|
70 |
+
capdirs = self.json_data["annotations"]
|
71 |
+
for capdir in tqdm(capdirs, desc="ImgToCaptions"):
|
72 |
+
# there are in average 5 captions per image
|
73 |
+
#self.img_id_to_captions[capdir["image_id"]].append(np.array([capdir["caption"]]))
|
74 |
+
self.img_id_to_captions[capdir["image_id"]].append(capdir["caption"])
|
75 |
+
|
76 |
+
self.rescaler = albumentations.SmallestMaxSize(max_size=self.size)
|
77 |
+
if self.split=="validation":
|
78 |
+
self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size)
|
79 |
+
else:
|
80 |
+
# default option for train is random crop
|
81 |
+
if self.crop_type in [None, 'random']:
|
82 |
+
self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size)
|
83 |
+
else:
|
84 |
+
self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size)
|
85 |
+
self.preprocessor = albumentations.Compose(
|
86 |
+
[self.rescaler, self.cropper],
|
87 |
+
additional_targets={"segmentation": "image"})
|
88 |
+
if force_no_crop:
|
89 |
+
self.rescaler = albumentations.Resize(height=self.size, width=self.size)
|
90 |
+
self.preprocessor = albumentations.Compose(
|
91 |
+
[self.rescaler],
|
92 |
+
additional_targets={"segmentation": "image"})
|
93 |
+
|
94 |
+
@abstractmethod
|
95 |
+
def year(self):
|
96 |
+
raise NotImplementedError()
|
97 |
+
|
98 |
+
def __len__(self):
|
99 |
+
return len(self.labels["image_ids"])
|
100 |
+
|
101 |
+
def preprocess_image(self, image_path, segmentation_path=None):
|
102 |
+
image = Image.open(image_path)
|
103 |
+
if not image.mode == "RGB":
|
104 |
+
image = image.convert("RGB")
|
105 |
+
image = np.array(image).astype(np.uint8)
|
106 |
+
if segmentation_path:
|
107 |
+
segmentation = Image.open(segmentation_path)
|
108 |
+
if not self.onehot and not segmentation.mode == "RGB":
|
109 |
+
segmentation = segmentation.convert("RGB")
|
110 |
+
segmentation = np.array(segmentation).astype(np.uint8)
|
111 |
+
if self.onehot:
|
112 |
+
assert self.stuffthing
|
113 |
+
# stored in caffe format: unlabeled==255. stuff and thing from
|
114 |
+
# 0-181. to be compatible with the labels in
|
115 |
+
# https://github.com/nightrome/cocostuff/blob/master/labels.txt
|
116 |
+
# we shift stuffthing one to the right and put unlabeled in zero
|
117 |
+
# as long as segmentation is uint8 shifting to right handles the
|
118 |
+
# latter too
|
119 |
+
assert segmentation.dtype == np.uint8
|
120 |
+
segmentation = segmentation + 1
|
121 |
+
|
122 |
+
processed = self.preprocessor(image=image, segmentation=segmentation)
|
123 |
+
|
124 |
+
image, segmentation = processed["image"], processed["segmentation"]
|
125 |
+
else:
|
126 |
+
image = self.preprocessor(image=image,)['image']
|
127 |
+
|
128 |
+
image = (image / 127.5 - 1.0).astype(np.float32)
|
129 |
+
if segmentation_path:
|
130 |
+
if self.onehot:
|
131 |
+
assert segmentation.dtype == np.uint8
|
132 |
+
# make it one hot
|
133 |
+
n_labels = 183
|
134 |
+
flatseg = np.ravel(segmentation)
|
135 |
+
onehot = np.zeros((flatseg.size, n_labels), dtype=np.bool)
|
136 |
+
onehot[np.arange(flatseg.size), flatseg] = True
|
137 |
+
onehot = onehot.reshape(segmentation.shape + (n_labels,)).astype(int)
|
138 |
+
segmentation = onehot
|
139 |
+
else:
|
140 |
+
segmentation = (segmentation / 127.5 - 1.0).astype(np.float32)
|
141 |
+
return image, segmentation
|
142 |
+
else:
|
143 |
+
return image
|
144 |
+
|
145 |
+
def __getitem__(self, i):
|
146 |
+
img_path = self.img_id_to_filepath[self.labels["image_ids"][i]]
|
147 |
+
if self.use_segmenation:
|
148 |
+
seg_path = self.img_id_to_segmentation_filepath[self.labels["image_ids"][i]]
|
149 |
+
image, segmentation = self.preprocess_image(img_path, seg_path)
|
150 |
+
else:
|
151 |
+
image = self.preprocess_image(img_path)
|
152 |
+
captions = self.img_id_to_captions[self.labels["image_ids"][i]]
|
153 |
+
# randomly draw one of all available captions per image
|
154 |
+
caption = captions[np.random.randint(0, len(captions))]
|
155 |
+
example = {"image": image,
|
156 |
+
#"caption": [str(caption[0])],
|
157 |
+
"caption": caption,
|
158 |
+
"img_path": img_path,
|
159 |
+
"filename_": img_path.split(os.sep)[-1]
|
160 |
+
}
|
161 |
+
if self.use_segmenation:
|
162 |
+
example.update({"seg_path": seg_path, 'segmentation': segmentation})
|
163 |
+
return example
|
164 |
+
|
165 |
+
|
166 |
+
class CocoImagesAndCaptionsTrain2017(CocoBase):
|
167 |
+
"""returns a pair of (image, caption)"""
|
168 |
+
def __init__(self, size, onehot_segmentation=False, use_stuffthing=False, crop_size=None, force_no_crop=False,):
|
169 |
+
super().__init__(size=size,
|
170 |
+
dataroot="data/coco/train2017",
|
171 |
+
datajson="data/coco/annotations/captions_train2017.json",
|
172 |
+
onehot_segmentation=onehot_segmentation,
|
173 |
+
use_stuffthing=use_stuffthing, crop_size=crop_size, force_no_crop=force_no_crop)
|
174 |
+
|
175 |
+
def get_split(self):
|
176 |
+
return "train"
|
177 |
+
|
178 |
+
def year(self):
|
179 |
+
return '2017'
|
180 |
+
|
181 |
+
|
182 |
+
class CocoImagesAndCaptionsValidation2017(CocoBase):
|
183 |
+
"""returns a pair of (image, caption)"""
|
184 |
+
def __init__(self, size, onehot_segmentation=False, use_stuffthing=False, crop_size=None, force_no_crop=False,
|
185 |
+
given_files=None):
|
186 |
+
super().__init__(size=size,
|
187 |
+
dataroot="data/coco/val2017",
|
188 |
+
datajson="data/coco/annotations/captions_val2017.json",
|
189 |
+
onehot_segmentation=onehot_segmentation,
|
190 |
+
use_stuffthing=use_stuffthing, crop_size=crop_size, force_no_crop=force_no_crop,
|
191 |
+
given_files=given_files)
|
192 |
+
|
193 |
+
def get_split(self):
|
194 |
+
return "validation"
|
195 |
+
|
196 |
+
def year(self):
|
197 |
+
return '2017'
|
198 |
+
|
199 |
+
|
200 |
+
|
201 |
+
class CocoImagesAndCaptionsTrain2014(CocoBase):
|
202 |
+
"""returns a pair of (image, caption)"""
|
203 |
+
def __init__(self, size, onehot_segmentation=False, use_stuffthing=False, crop_size=None, force_no_crop=False,crop_type='random'):
|
204 |
+
super().__init__(size=size,
|
205 |
+
dataroot="data/coco/train2014",
|
206 |
+
datajson="data/coco/annotations2014/annotations/captions_train2014.json",
|
207 |
+
onehot_segmentation=onehot_segmentation,
|
208 |
+
use_stuffthing=use_stuffthing, crop_size=crop_size, force_no_crop=force_no_crop,
|
209 |
+
use_segmentation=False,
|
210 |
+
crop_type=crop_type)
|
211 |
+
|
212 |
+
def get_split(self):
|
213 |
+
return "train"
|
214 |
+
|
215 |
+
def year(self):
|
216 |
+
return '2014'
|
217 |
+
|
218 |
+
class CocoImagesAndCaptionsValidation2014(CocoBase):
|
219 |
+
"""returns a pair of (image, caption)"""
|
220 |
+
def __init__(self, size, onehot_segmentation=False, use_stuffthing=False, crop_size=None, force_no_crop=False,
|
221 |
+
given_files=None,crop_type='center',**kwargs):
|
222 |
+
super().__init__(size=size,
|
223 |
+
dataroot="data/coco/val2014",
|
224 |
+
datajson="data/coco/annotations2014/annotations/captions_val2014.json",
|
225 |
+
onehot_segmentation=onehot_segmentation,
|
226 |
+
use_stuffthing=use_stuffthing, crop_size=crop_size, force_no_crop=force_no_crop,
|
227 |
+
given_files=given_files,
|
228 |
+
use_segmentation=False,
|
229 |
+
crop_type=crop_type)
|
230 |
+
|
231 |
+
def get_split(self):
|
232 |
+
return "validation"
|
233 |
+
|
234 |
+
def year(self):
|
235 |
+
return '2014'
|
236 |
+
|
237 |
+
if __name__ == '__main__':
|
238 |
+
with open("data/coco/annotations2014/annotations/captions_val2014.json", "r") as json_file:
|
239 |
+
json_data = json.load(json_file)
|
240 |
+
capdirs = json_data["annotations"]
|
241 |
+
import pudb; pudb.set_trace()
|
242 |
+
#d2 = CocoImagesAndCaptionsTrain2014(size=256)
|
243 |
+
d2 = CocoImagesAndCaptionsValidation2014(size=256)
|
244 |
+
print("constructed dataset.")
|
245 |
+
print(f"length of {d2.__class__.__name__}: {len(d2)}")
|
246 |
+
|
247 |
+
ex2 = d2[0]
|
248 |
+
# ex3 = d3[0]
|
249 |
+
# print(ex1["image"].shape)
|
250 |
+
print(ex2["image"].shape)
|
251 |
+
# print(ex3["image"].shape)
|
252 |
+
# print(ex1["segmentation"].shape)
|
253 |
+
print(ex2["caption"].__class__.__name__)
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/dummy.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import random
|
3 |
+
import string
|
4 |
+
from torch.utils.data import Dataset, Subset
|
5 |
+
|
6 |
+
class DummyData(Dataset):
|
7 |
+
def __init__(self, length, size):
|
8 |
+
self.length = length
|
9 |
+
self.size = size
|
10 |
+
|
11 |
+
def __len__(self):
|
12 |
+
return self.length
|
13 |
+
|
14 |
+
def __getitem__(self, i):
|
15 |
+
x = np.random.randn(*self.size)
|
16 |
+
letters = string.ascii_lowercase
|
17 |
+
y = ''.join(random.choice(string.ascii_lowercase) for i in range(10))
|
18 |
+
return {"jpg": x, "txt": y}
|
19 |
+
|
20 |
+
|
21 |
+
class DummyDataWithEmbeddings(Dataset):
|
22 |
+
def __init__(self, length, size, emb_size):
|
23 |
+
self.length = length
|
24 |
+
self.size = size
|
25 |
+
self.emb_size = emb_size
|
26 |
+
|
27 |
+
def __len__(self):
|
28 |
+
return self.length
|
29 |
+
|
30 |
+
def __getitem__(self, i):
|
31 |
+
x = np.random.randn(*self.size)
|
32 |
+
y = np.random.randn(*self.emb_size).astype(np.float32)
|
33 |
+
return {"jpg": x, "txt": y}
|
34 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/imagenet.py
ADDED
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os, yaml, pickle, shutil, tarfile, glob
|
2 |
+
import cv2
|
3 |
+
import albumentations
|
4 |
+
import PIL
|
5 |
+
import numpy as np
|
6 |
+
import torchvision.transforms.functional as TF
|
7 |
+
from omegaconf import OmegaConf
|
8 |
+
from functools import partial
|
9 |
+
from PIL import Image
|
10 |
+
from tqdm import tqdm
|
11 |
+
from torch.utils.data import Dataset, Subset
|
12 |
+
|
13 |
+
import taming.data.utils as tdu
|
14 |
+
from taming.data.imagenet import str_to_indices, give_synsets_from_indices, download, retrieve
|
15 |
+
from taming.data.imagenet import ImagePaths
|
16 |
+
|
17 |
+
from ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light
|
18 |
+
|
19 |
+
|
20 |
+
def synset2idx(path_to_yaml="data/index_synset.yaml"):
|
21 |
+
with open(path_to_yaml) as f:
|
22 |
+
di2s = yaml.load(f)
|
23 |
+
return dict((v,k) for k,v in di2s.items())
|
24 |
+
|
25 |
+
|
26 |
+
class ImageNetBase(Dataset):
|
27 |
+
def __init__(self, config=None):
|
28 |
+
self.config = config or OmegaConf.create()
|
29 |
+
if not type(self.config)==dict:
|
30 |
+
self.config = OmegaConf.to_container(self.config)
|
31 |
+
self.keep_orig_class_label = self.config.get("keep_orig_class_label", False)
|
32 |
+
self.process_images = True # if False we skip loading & processing images and self.data contains filepaths
|
33 |
+
self._prepare()
|
34 |
+
self._prepare_synset_to_human()
|
35 |
+
self._prepare_idx_to_synset()
|
36 |
+
self._prepare_human_to_integer_label()
|
37 |
+
self._load()
|
38 |
+
|
39 |
+
def __len__(self):
|
40 |
+
return len(self.data)
|
41 |
+
|
42 |
+
def __getitem__(self, i):
|
43 |
+
return self.data[i]
|
44 |
+
|
45 |
+
def _prepare(self):
|
46 |
+
raise NotImplementedError()
|
47 |
+
|
48 |
+
def _filter_relpaths(self, relpaths):
|
49 |
+
ignore = set([
|
50 |
+
"n06596364_9591.JPEG",
|
51 |
+
])
|
52 |
+
relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore]
|
53 |
+
if "sub_indices" in self.config:
|
54 |
+
indices = str_to_indices(self.config["sub_indices"])
|
55 |
+
synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) # returns a list of strings
|
56 |
+
self.synset2idx = synset2idx(path_to_yaml=self.idx2syn)
|
57 |
+
files = []
|
58 |
+
for rpath in relpaths:
|
59 |
+
syn = rpath.split("/")[0]
|
60 |
+
if syn in synsets:
|
61 |
+
files.append(rpath)
|
62 |
+
return files
|
63 |
+
else:
|
64 |
+
return relpaths
|
65 |
+
|
66 |
+
def _prepare_synset_to_human(self):
|
67 |
+
SIZE = 2655750
|
68 |
+
URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1"
|
69 |
+
self.human_dict = os.path.join(self.root, "synset_human.txt")
|
70 |
+
if (not os.path.exists(self.human_dict) or
|
71 |
+
not os.path.getsize(self.human_dict)==SIZE):
|
72 |
+
download(URL, self.human_dict)
|
73 |
+
|
74 |
+
def _prepare_idx_to_synset(self):
|
75 |
+
URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1"
|
76 |
+
self.idx2syn = os.path.join(self.root, "index_synset.yaml")
|
77 |
+
if (not os.path.exists(self.idx2syn)):
|
78 |
+
download(URL, self.idx2syn)
|
79 |
+
|
80 |
+
def _prepare_human_to_integer_label(self):
|
81 |
+
URL = "https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1"
|
82 |
+
self.human2integer = os.path.join(self.root, "imagenet1000_clsidx_to_labels.txt")
|
83 |
+
if (not os.path.exists(self.human2integer)):
|
84 |
+
download(URL, self.human2integer)
|
85 |
+
with open(self.human2integer, "r") as f:
|
86 |
+
lines = f.read().splitlines()
|
87 |
+
assert len(lines) == 1000
|
88 |
+
self.human2integer_dict = dict()
|
89 |
+
for line in lines:
|
90 |
+
value, key = line.split(":")
|
91 |
+
self.human2integer_dict[key] = int(value)
|
92 |
+
|
93 |
+
def _load(self):
|
94 |
+
with open(self.txt_filelist, "r") as f:
|
95 |
+
self.relpaths = f.read().splitlines()
|
96 |
+
l1 = len(self.relpaths)
|
97 |
+
self.relpaths = self._filter_relpaths(self.relpaths)
|
98 |
+
print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths)))
|
99 |
+
|
100 |
+
self.synsets = [p.split("/")[0] for p in self.relpaths]
|
101 |
+
self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths]
|
102 |
+
|
103 |
+
unique_synsets = np.unique(self.synsets)
|
104 |
+
class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets))
|
105 |
+
if not self.keep_orig_class_label:
|
106 |
+
self.class_labels = [class_dict[s] for s in self.synsets]
|
107 |
+
else:
|
108 |
+
self.class_labels = [self.synset2idx[s] for s in self.synsets]
|
109 |
+
|
110 |
+
with open(self.human_dict, "r") as f:
|
111 |
+
human_dict = f.read().splitlines()
|
112 |
+
human_dict = dict(line.split(maxsplit=1) for line in human_dict)
|
113 |
+
|
114 |
+
self.human_labels = [human_dict[s] for s in self.synsets]
|
115 |
+
|
116 |
+
labels = {
|
117 |
+
"relpath": np.array(self.relpaths),
|
118 |
+
"synsets": np.array(self.synsets),
|
119 |
+
"class_label": np.array(self.class_labels),
|
120 |
+
"human_label": np.array(self.human_labels),
|
121 |
+
}
|
122 |
+
|
123 |
+
if self.process_images:
|
124 |
+
self.size = retrieve(self.config, "size", default=256)
|
125 |
+
self.data = ImagePaths(self.abspaths,
|
126 |
+
labels=labels,
|
127 |
+
size=self.size,
|
128 |
+
random_crop=self.random_crop,
|
129 |
+
)
|
130 |
+
else:
|
131 |
+
self.data = self.abspaths
|
132 |
+
|
133 |
+
|
134 |
+
class ImageNetTrain(ImageNetBase):
|
135 |
+
NAME = "ILSVRC2012_train"
|
136 |
+
URL = "http://www.image-net.org/challenges/LSVRC/2012/"
|
137 |
+
AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2"
|
138 |
+
FILES = [
|
139 |
+
"ILSVRC2012_img_train.tar",
|
140 |
+
]
|
141 |
+
SIZES = [
|
142 |
+
147897477120,
|
143 |
+
]
|
144 |
+
|
145 |
+
def __init__(self, process_images=True, data_root=None, **kwargs):
|
146 |
+
self.process_images = process_images
|
147 |
+
self.data_root = data_root
|
148 |
+
super().__init__(**kwargs)
|
149 |
+
|
150 |
+
def _prepare(self):
|
151 |
+
if self.data_root:
|
152 |
+
self.root = os.path.join(self.data_root, self.NAME)
|
153 |
+
else:
|
154 |
+
cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
|
155 |
+
self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
|
156 |
+
|
157 |
+
self.datadir = os.path.join(self.root, "data")
|
158 |
+
self.txt_filelist = os.path.join(self.root, "filelist.txt")
|
159 |
+
self.expected_length = 1281167
|
160 |
+
self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop",
|
161 |
+
default=True)
|
162 |
+
if not tdu.is_prepared(self.root):
|
163 |
+
# prep
|
164 |
+
print("Preparing dataset {} in {}".format(self.NAME, self.root))
|
165 |
+
|
166 |
+
datadir = self.datadir
|
167 |
+
if not os.path.exists(datadir):
|
168 |
+
path = os.path.join(self.root, self.FILES[0])
|
169 |
+
if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
|
170 |
+
import academictorrents as at
|
171 |
+
atpath = at.get(self.AT_HASH, datastore=self.root)
|
172 |
+
assert atpath == path
|
173 |
+
|
174 |
+
print("Extracting {} to {}".format(path, datadir))
|
175 |
+
os.makedirs(datadir, exist_ok=True)
|
176 |
+
with tarfile.open(path, "r:") as tar:
|
177 |
+
tar.extractall(path=datadir)
|
178 |
+
|
179 |
+
print("Extracting sub-tars.")
|
180 |
+
subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar")))
|
181 |
+
for subpath in tqdm(subpaths):
|
182 |
+
subdir = subpath[:-len(".tar")]
|
183 |
+
os.makedirs(subdir, exist_ok=True)
|
184 |
+
with tarfile.open(subpath, "r:") as tar:
|
185 |
+
tar.extractall(path=subdir)
|
186 |
+
|
187 |
+
filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
|
188 |
+
filelist = [os.path.relpath(p, start=datadir) for p in filelist]
|
189 |
+
filelist = sorted(filelist)
|
190 |
+
filelist = "\n".join(filelist)+"\n"
|
191 |
+
with open(self.txt_filelist, "w") as f:
|
192 |
+
f.write(filelist)
|
193 |
+
|
194 |
+
tdu.mark_prepared(self.root)
|
195 |
+
|
196 |
+
|
197 |
+
class ImageNetValidation(ImageNetBase):
|
198 |
+
NAME = "ILSVRC2012_validation"
|
199 |
+
URL = "http://www.image-net.org/challenges/LSVRC/2012/"
|
200 |
+
AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5"
|
201 |
+
VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1"
|
202 |
+
FILES = [
|
203 |
+
"ILSVRC2012_img_val.tar",
|
204 |
+
"validation_synset.txt",
|
205 |
+
]
|
206 |
+
SIZES = [
|
207 |
+
6744924160,
|
208 |
+
1950000,
|
209 |
+
]
|
210 |
+
|
211 |
+
def __init__(self, process_images=True, data_root=None, **kwargs):
|
212 |
+
self.data_root = data_root
|
213 |
+
self.process_images = process_images
|
214 |
+
super().__init__(**kwargs)
|
215 |
+
|
216 |
+
def _prepare(self):
|
217 |
+
if self.data_root:
|
218 |
+
self.root = os.path.join(self.data_root, self.NAME)
|
219 |
+
else:
|
220 |
+
cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
|
221 |
+
self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
|
222 |
+
self.datadir = os.path.join(self.root, "data")
|
223 |
+
self.txt_filelist = os.path.join(self.root, "filelist.txt")
|
224 |
+
self.expected_length = 50000
|
225 |
+
self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop",
|
226 |
+
default=False)
|
227 |
+
if not tdu.is_prepared(self.root):
|
228 |
+
# prep
|
229 |
+
print("Preparing dataset {} in {}".format(self.NAME, self.root))
|
230 |
+
|
231 |
+
datadir = self.datadir
|
232 |
+
if not os.path.exists(datadir):
|
233 |
+
path = os.path.join(self.root, self.FILES[0])
|
234 |
+
if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
|
235 |
+
import academictorrents as at
|
236 |
+
atpath = at.get(self.AT_HASH, datastore=self.root)
|
237 |
+
assert atpath == path
|
238 |
+
|
239 |
+
print("Extracting {} to {}".format(path, datadir))
|
240 |
+
os.makedirs(datadir, exist_ok=True)
|
241 |
+
with tarfile.open(path, "r:") as tar:
|
242 |
+
tar.extractall(path=datadir)
|
243 |
+
|
244 |
+
vspath = os.path.join(self.root, self.FILES[1])
|
245 |
+
if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]:
|
246 |
+
download(self.VS_URL, vspath)
|
247 |
+
|
248 |
+
with open(vspath, "r") as f:
|
249 |
+
synset_dict = f.read().splitlines()
|
250 |
+
synset_dict = dict(line.split() for line in synset_dict)
|
251 |
+
|
252 |
+
print("Reorganizing into synset folders")
|
253 |
+
synsets = np.unique(list(synset_dict.values()))
|
254 |
+
for s in synsets:
|
255 |
+
os.makedirs(os.path.join(datadir, s), exist_ok=True)
|
256 |
+
for k, v in synset_dict.items():
|
257 |
+
src = os.path.join(datadir, k)
|
258 |
+
dst = os.path.join(datadir, v)
|
259 |
+
shutil.move(src, dst)
|
260 |
+
|
261 |
+
filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
|
262 |
+
filelist = [os.path.relpath(p, start=datadir) for p in filelist]
|
263 |
+
filelist = sorted(filelist)
|
264 |
+
filelist = "\n".join(filelist)+"\n"
|
265 |
+
with open(self.txt_filelist, "w") as f:
|
266 |
+
f.write(filelist)
|
267 |
+
|
268 |
+
tdu.mark_prepared(self.root)
|
269 |
+
|
270 |
+
|
271 |
+
|
272 |
+
class ImageNetSR(Dataset):
|
273 |
+
def __init__(self, size=None,
|
274 |
+
degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1.,
|
275 |
+
random_crop=True):
|
276 |
+
"""
|
277 |
+
Imagenet Superresolution Dataloader
|
278 |
+
Performs following ops in order:
|
279 |
+
1. crops a crop of size s from image either as random or center crop
|
280 |
+
2. resizes crop to size with cv2.area_interpolation
|
281 |
+
3. degrades resized crop with degradation_fn
|
282 |
+
|
283 |
+
:param size: resizing to size after cropping
|
284 |
+
:param degradation: degradation_fn, e.g. cv_bicubic or bsrgan_light
|
285 |
+
:param downscale_f: Low Resolution Downsample factor
|
286 |
+
:param min_crop_f: determines crop size s,
|
287 |
+
where s = c * min_img_side_len with c sampled from interval (min_crop_f, max_crop_f)
|
288 |
+
:param max_crop_f: ""
|
289 |
+
:param data_root:
|
290 |
+
:param random_crop:
|
291 |
+
"""
|
292 |
+
self.base = self.get_base()
|
293 |
+
assert size
|
294 |
+
assert (size / downscale_f).is_integer()
|
295 |
+
self.size = size
|
296 |
+
self.LR_size = int(size / downscale_f)
|
297 |
+
self.min_crop_f = min_crop_f
|
298 |
+
self.max_crop_f = max_crop_f
|
299 |
+
assert(max_crop_f <= 1.)
|
300 |
+
self.center_crop = not random_crop
|
301 |
+
|
302 |
+
self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA)
|
303 |
+
|
304 |
+
self.pil_interpolation = False # gets reset later if incase interp_op is from pillow
|
305 |
+
|
306 |
+
if degradation == "bsrgan":
|
307 |
+
self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f)
|
308 |
+
|
309 |
+
elif degradation == "bsrgan_light":
|
310 |
+
self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f)
|
311 |
+
|
312 |
+
else:
|
313 |
+
interpolation_fn = {
|
314 |
+
"cv_nearest": cv2.INTER_NEAREST,
|
315 |
+
"cv_bilinear": cv2.INTER_LINEAR,
|
316 |
+
"cv_bicubic": cv2.INTER_CUBIC,
|
317 |
+
"cv_area": cv2.INTER_AREA,
|
318 |
+
"cv_lanczos": cv2.INTER_LANCZOS4,
|
319 |
+
"pil_nearest": PIL.Image.NEAREST,
|
320 |
+
"pil_bilinear": PIL.Image.BILINEAR,
|
321 |
+
"pil_bicubic": PIL.Image.BICUBIC,
|
322 |
+
"pil_box": PIL.Image.BOX,
|
323 |
+
"pil_hamming": PIL.Image.HAMMING,
|
324 |
+
"pil_lanczos": PIL.Image.LANCZOS,
|
325 |
+
}[degradation]
|
326 |
+
|
327 |
+
self.pil_interpolation = degradation.startswith("pil_")
|
328 |
+
|
329 |
+
if self.pil_interpolation:
|
330 |
+
self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn)
|
331 |
+
|
332 |
+
else:
|
333 |
+
self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size,
|
334 |
+
interpolation=interpolation_fn)
|
335 |
+
|
336 |
+
def __len__(self):
|
337 |
+
return len(self.base)
|
338 |
+
|
339 |
+
def __getitem__(self, i):
|
340 |
+
example = self.base[i]
|
341 |
+
image = Image.open(example["file_path_"])
|
342 |
+
|
343 |
+
if not image.mode == "RGB":
|
344 |
+
image = image.convert("RGB")
|
345 |
+
|
346 |
+
image = np.array(image).astype(np.uint8)
|
347 |
+
|
348 |
+
min_side_len = min(image.shape[:2])
|
349 |
+
crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None)
|
350 |
+
crop_side_len = int(crop_side_len)
|
351 |
+
|
352 |
+
if self.center_crop:
|
353 |
+
self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len)
|
354 |
+
|
355 |
+
else:
|
356 |
+
self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len)
|
357 |
+
|
358 |
+
image = self.cropper(image=image)["image"]
|
359 |
+
image = self.image_rescaler(image=image)["image"]
|
360 |
+
|
361 |
+
if self.pil_interpolation:
|
362 |
+
image_pil = PIL.Image.fromarray(image)
|
363 |
+
LR_image = self.degradation_process(image_pil)
|
364 |
+
LR_image = np.array(LR_image).astype(np.uint8)
|
365 |
+
|
366 |
+
else:
|
367 |
+
LR_image = self.degradation_process(image=image)["image"]
|
368 |
+
|
369 |
+
example["image"] = (image/127.5 - 1.0).astype(np.float32)
|
370 |
+
example["LR_image"] = (LR_image/127.5 - 1.0).astype(np.float32)
|
371 |
+
example["caption"] = example["human_label"] # dummy caption
|
372 |
+
return example
|
373 |
+
|
374 |
+
|
375 |
+
class ImageNetSRTrain(ImageNetSR):
|
376 |
+
def __init__(self, **kwargs):
|
377 |
+
super().__init__(**kwargs)
|
378 |
+
|
379 |
+
def get_base(self):
|
380 |
+
with open("data/imagenet_train_hr_indices.p", "rb") as f:
|
381 |
+
indices = pickle.load(f)
|
382 |
+
dset = ImageNetTrain(process_images=False,)
|
383 |
+
return Subset(dset, indices)
|
384 |
+
|
385 |
+
|
386 |
+
class ImageNetSRValidation(ImageNetSR):
|
387 |
+
def __init__(self, **kwargs):
|
388 |
+
super().__init__(**kwargs)
|
389 |
+
|
390 |
+
def get_base(self):
|
391 |
+
with open("data/imagenet_val_hr_indices.p", "rb") as f:
|
392 |
+
indices = pickle.load(f)
|
393 |
+
dset = ImageNetValidation(process_images=False,)
|
394 |
+
return Subset(dset, indices)
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/inpainting/__init__.py
ADDED
File without changes
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/inpainting/synthetic_mask.py
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image, ImageDraw
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
settings = {
|
5 |
+
"256narrow": {
|
6 |
+
"p_irr": 1,
|
7 |
+
"min_n_irr": 4,
|
8 |
+
"max_n_irr": 50,
|
9 |
+
"max_l_irr": 40,
|
10 |
+
"max_w_irr": 10,
|
11 |
+
"min_n_box": None,
|
12 |
+
"max_n_box": None,
|
13 |
+
"min_s_box": None,
|
14 |
+
"max_s_box": None,
|
15 |
+
"marg": None,
|
16 |
+
},
|
17 |
+
"256train": {
|
18 |
+
"p_irr": 0.5,
|
19 |
+
"min_n_irr": 1,
|
20 |
+
"max_n_irr": 5,
|
21 |
+
"max_l_irr": 200,
|
22 |
+
"max_w_irr": 100,
|
23 |
+
"min_n_box": 1,
|
24 |
+
"max_n_box": 4,
|
25 |
+
"min_s_box": 30,
|
26 |
+
"max_s_box": 150,
|
27 |
+
"marg": 10,
|
28 |
+
},
|
29 |
+
"512train": { # TODO: experimental
|
30 |
+
"p_irr": 0.5,
|
31 |
+
"min_n_irr": 1,
|
32 |
+
"max_n_irr": 5,
|
33 |
+
"max_l_irr": 450,
|
34 |
+
"max_w_irr": 250,
|
35 |
+
"min_n_box": 1,
|
36 |
+
"max_n_box": 4,
|
37 |
+
"min_s_box": 30,
|
38 |
+
"max_s_box": 300,
|
39 |
+
"marg": 10,
|
40 |
+
},
|
41 |
+
"512train-large": { # TODO: experimental
|
42 |
+
"p_irr": 0.5,
|
43 |
+
"min_n_irr": 1,
|
44 |
+
"max_n_irr": 5,
|
45 |
+
"max_l_irr": 450,
|
46 |
+
"max_w_irr": 400,
|
47 |
+
"min_n_box": 1,
|
48 |
+
"max_n_box": 4,
|
49 |
+
"min_s_box": 75,
|
50 |
+
"max_s_box": 450,
|
51 |
+
"marg": 10,
|
52 |
+
},
|
53 |
+
}
|
54 |
+
|
55 |
+
|
56 |
+
def gen_segment_mask(mask, start, end, brush_width):
|
57 |
+
mask = mask > 0
|
58 |
+
mask = (255 * mask).astype(np.uint8)
|
59 |
+
mask = Image.fromarray(mask)
|
60 |
+
draw = ImageDraw.Draw(mask)
|
61 |
+
draw.line([start, end], fill=255, width=brush_width, joint="curve")
|
62 |
+
mask = np.array(mask) / 255
|
63 |
+
return mask
|
64 |
+
|
65 |
+
|
66 |
+
def gen_box_mask(mask, masked):
|
67 |
+
x_0, y_0, w, h = masked
|
68 |
+
mask[y_0:y_0 + h, x_0:x_0 + w] = 1
|
69 |
+
return mask
|
70 |
+
|
71 |
+
|
72 |
+
def gen_round_mask(mask, masked, radius):
|
73 |
+
x_0, y_0, w, h = masked
|
74 |
+
xy = [(x_0, y_0), (x_0 + w, y_0 + w)]
|
75 |
+
|
76 |
+
mask = mask > 0
|
77 |
+
mask = (255 * mask).astype(np.uint8)
|
78 |
+
mask = Image.fromarray(mask)
|
79 |
+
draw = ImageDraw.Draw(mask)
|
80 |
+
draw.rounded_rectangle(xy, radius=radius, fill=255)
|
81 |
+
mask = np.array(mask) / 255
|
82 |
+
return mask
|
83 |
+
|
84 |
+
|
85 |
+
def gen_large_mask(prng, img_h, img_w,
|
86 |
+
marg, p_irr, min_n_irr, max_n_irr, max_l_irr, max_w_irr,
|
87 |
+
min_n_box, max_n_box, min_s_box, max_s_box):
|
88 |
+
"""
|
89 |
+
img_h: int, an image height
|
90 |
+
img_w: int, an image width
|
91 |
+
marg: int, a margin for a box starting coordinate
|
92 |
+
p_irr: float, 0 <= p_irr <= 1, a probability of a polygonal chain mask
|
93 |
+
|
94 |
+
min_n_irr: int, min number of segments
|
95 |
+
max_n_irr: int, max number of segments
|
96 |
+
max_l_irr: max length of a segment in polygonal chain
|
97 |
+
max_w_irr: max width of a segment in polygonal chain
|
98 |
+
|
99 |
+
min_n_box: int, min bound for the number of box primitives
|
100 |
+
max_n_box: int, max bound for the number of box primitives
|
101 |
+
min_s_box: int, min length of a box side
|
102 |
+
max_s_box: int, max length of a box side
|
103 |
+
"""
|
104 |
+
|
105 |
+
mask = np.zeros((img_h, img_w))
|
106 |
+
uniform = prng.randint
|
107 |
+
|
108 |
+
if np.random.uniform(0, 1) < p_irr: # generate polygonal chain
|
109 |
+
n = uniform(min_n_irr, max_n_irr) # sample number of segments
|
110 |
+
|
111 |
+
for _ in range(n):
|
112 |
+
y = uniform(0, img_h) # sample a starting point
|
113 |
+
x = uniform(0, img_w)
|
114 |
+
|
115 |
+
a = uniform(0, 360) # sample angle
|
116 |
+
l = uniform(10, max_l_irr) # sample segment length
|
117 |
+
w = uniform(5, max_w_irr) # sample a segment width
|
118 |
+
|
119 |
+
# draw segment starting from (x,y) to (x_,y_) using brush of width w
|
120 |
+
x_ = x + l * np.sin(a)
|
121 |
+
y_ = y + l * np.cos(a)
|
122 |
+
|
123 |
+
mask = gen_segment_mask(mask, start=(x, y), end=(x_, y_), brush_width=w)
|
124 |
+
x, y = x_, y_
|
125 |
+
else: # generate Box masks
|
126 |
+
n = uniform(min_n_box, max_n_box) # sample number of rectangles
|
127 |
+
|
128 |
+
for _ in range(n):
|
129 |
+
h = uniform(min_s_box, max_s_box) # sample box shape
|
130 |
+
w = uniform(min_s_box, max_s_box)
|
131 |
+
|
132 |
+
x_0 = uniform(marg, img_w - marg - w) # sample upper-left coordinates of box
|
133 |
+
y_0 = uniform(marg, img_h - marg - h)
|
134 |
+
|
135 |
+
if np.random.uniform(0, 1) < 0.5:
|
136 |
+
mask = gen_box_mask(mask, masked=(x_0, y_0, w, h))
|
137 |
+
else:
|
138 |
+
r = uniform(0, 60) # sample radius
|
139 |
+
mask = gen_round_mask(mask, masked=(x_0, y_0, w, h), radius=r)
|
140 |
+
return mask
|
141 |
+
|
142 |
+
|
143 |
+
make_lama_mask = lambda prng, h, w: gen_large_mask(prng, h, w, **settings["256train"])
|
144 |
+
make_narrow_lama_mask = lambda prng, h, w: gen_large_mask(prng, h, w, **settings["256narrow"])
|
145 |
+
make_512_lama_mask = lambda prng, h, w: gen_large_mask(prng, h, w, **settings["512train"])
|
146 |
+
make_512_lama_mask_large = lambda prng, h, w: gen_large_mask(prng, h, w, **settings["512train-large"])
|
147 |
+
|
148 |
+
|
149 |
+
MASK_MODES = {
|
150 |
+
"256train": make_lama_mask,
|
151 |
+
"256narrow": make_narrow_lama_mask,
|
152 |
+
"512train": make_512_lama_mask,
|
153 |
+
"512train-large": make_512_lama_mask_large
|
154 |
+
}
|
155 |
+
|
156 |
+
if __name__ == "__main__":
|
157 |
+
import sys
|
158 |
+
|
159 |
+
out = sys.argv[1]
|
160 |
+
|
161 |
+
prng = np.random.RandomState(1)
|
162 |
+
kwargs = settings["256train"]
|
163 |
+
mask = gen_large_mask(prng, 256, 256, **kwargs)
|
164 |
+
mask = (255 * mask).astype(np.uint8)
|
165 |
+
mask = Image.fromarray(mask)
|
166 |
+
mask.save(out)
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/laion.py
ADDED
@@ -0,0 +1,516 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import webdataset as wds
|
2 |
+
import kornia
|
3 |
+
from PIL import Image
|
4 |
+
import io
|
5 |
+
import os
|
6 |
+
import torchvision
|
7 |
+
from PIL import Image
|
8 |
+
import glob
|
9 |
+
import random
|
10 |
+
import numpy as np
|
11 |
+
from lightning_utilities import module_available
|
12 |
+
|
13 |
+
if module_available("lightning"):
|
14 |
+
import lightning.pytorch as pl
|
15 |
+
elif module_available("pytorch_lightning"):
|
16 |
+
import pytorch_lightning as pl
|
17 |
+
from tqdm import tqdm
|
18 |
+
from omegaconf import OmegaConf
|
19 |
+
from einops import rearrange
|
20 |
+
import torch
|
21 |
+
from webdataset.handlers import warn_and_continue
|
22 |
+
|
23 |
+
|
24 |
+
from ldm.util import instantiate_from_config
|
25 |
+
from ldm.data.inpainting.synthetic_mask import gen_large_mask, MASK_MODES
|
26 |
+
from ldm.data.base import PRNGMixin
|
27 |
+
|
28 |
+
|
29 |
+
class DataWithWings(torch.utils.data.IterableDataset):
|
30 |
+
def __init__(self, min_size, transform=None, target_transform=None):
|
31 |
+
self.min_size = min_size
|
32 |
+
self.transform = transform if transform is not None else nn.Identity()
|
33 |
+
self.target_transform = target_transform if target_transform is not None else nn.Identity()
|
34 |
+
self.kv = OnDiskKV(file='/home/ubuntu/laion5B-watermark-safety-ordered', key_format='q', value_format='ee')
|
35 |
+
self.kv_aesthetic = OnDiskKV(file='/home/ubuntu/laion5B-aesthetic-tags-kv', key_format='q', value_format='e')
|
36 |
+
self.pwatermark_threshold = 0.8
|
37 |
+
self.punsafe_threshold = 0.5
|
38 |
+
self.aesthetic_threshold = 5.
|
39 |
+
self.total_samples = 0
|
40 |
+
self.samples = 0
|
41 |
+
location = 'pipe:aws s3 cp --quiet s3://s-datasets/laion5b/laion2B-data/{000000..231349}.tar -'
|
42 |
+
|
43 |
+
self.inner_dataset = wds.DataPipeline(
|
44 |
+
wds.ResampledShards(location),
|
45 |
+
wds.tarfile_to_samples(handler=wds.warn_and_continue),
|
46 |
+
wds.shuffle(1000, handler=wds.warn_and_continue),
|
47 |
+
wds.decode('pilrgb', handler=wds.warn_and_continue),
|
48 |
+
wds.map(self._add_tags, handler=wds.ignore_and_continue),
|
49 |
+
wds.select(self._filter_predicate),
|
50 |
+
wds.map_dict(jpg=self.transform, txt=self.target_transform, punsafe=self._punsafe_to_class, handler=wds.warn_and_continue),
|
51 |
+
wds.to_tuple('jpg', 'txt', 'punsafe', handler=wds.warn_and_continue),
|
52 |
+
)
|
53 |
+
|
54 |
+
@staticmethod
|
55 |
+
def _compute_hash(url, text):
|
56 |
+
if url is None:
|
57 |
+
url = ''
|
58 |
+
if text is None:
|
59 |
+
text = ''
|
60 |
+
total = (url + text).encode('utf-8')
|
61 |
+
return mmh3.hash64(total)[0]
|
62 |
+
|
63 |
+
def _add_tags(self, x):
|
64 |
+
hsh = self._compute_hash(x['json']['url'], x['txt'])
|
65 |
+
pwatermark, punsafe = self.kv[hsh]
|
66 |
+
aesthetic = self.kv_aesthetic[hsh][0]
|
67 |
+
return {**x, 'pwatermark': pwatermark, 'punsafe': punsafe, 'aesthetic': aesthetic}
|
68 |
+
|
69 |
+
def _punsafe_to_class(self, punsafe):
|
70 |
+
return torch.tensor(punsafe >= self.punsafe_threshold).long()
|
71 |
+
|
72 |
+
def _filter_predicate(self, x):
|
73 |
+
try:
|
74 |
+
return x['pwatermark'] < self.pwatermark_threshold and x['aesthetic'] >= self.aesthetic_threshold and x['json']['original_width'] >= self.min_size and x['json']['original_height'] >= self.min_size
|
75 |
+
except:
|
76 |
+
return False
|
77 |
+
|
78 |
+
def __iter__(self):
|
79 |
+
return iter(self.inner_dataset)
|
80 |
+
|
81 |
+
|
82 |
+
def dict_collation_fn(samples, combine_tensors=True, combine_scalars=True):
|
83 |
+
"""Take a list of samples (as dictionary) and create a batch, preserving the keys.
|
84 |
+
If `tensors` is True, `ndarray` objects are combined into
|
85 |
+
tensor batches.
|
86 |
+
:param dict samples: list of samples
|
87 |
+
:param bool tensors: whether to turn lists of ndarrays into a single ndarray
|
88 |
+
:returns: single sample consisting of a batch
|
89 |
+
:rtype: dict
|
90 |
+
"""
|
91 |
+
keys = set.intersection(*[set(sample.keys()) for sample in samples])
|
92 |
+
batched = {key: [] for key in keys}
|
93 |
+
|
94 |
+
for s in samples:
|
95 |
+
[batched[key].append(s[key]) for key in batched]
|
96 |
+
|
97 |
+
result = {}
|
98 |
+
for key in batched:
|
99 |
+
if isinstance(batched[key][0], (int, float)):
|
100 |
+
if combine_scalars:
|
101 |
+
result[key] = np.array(list(batched[key]))
|
102 |
+
elif isinstance(batched[key][0], torch.Tensor):
|
103 |
+
if combine_tensors:
|
104 |
+
result[key] = torch.stack(list(batched[key]))
|
105 |
+
elif isinstance(batched[key][0], np.ndarray):
|
106 |
+
if combine_tensors:
|
107 |
+
result[key] = np.array(list(batched[key]))
|
108 |
+
else:
|
109 |
+
result[key] = list(batched[key])
|
110 |
+
return result
|
111 |
+
|
112 |
+
|
113 |
+
class WebDataModuleFromConfig(pl.LightningDataModule):
|
114 |
+
def __init__(self, tar_base, batch_size, train=None, validation=None,
|
115 |
+
test=None, num_workers=4, multinode=True, min_size=None,
|
116 |
+
max_pwatermark=1.0,
|
117 |
+
**kwargs):
|
118 |
+
super().__init__()
|
119 |
+
print(f'Setting tar base to {tar_base}')
|
120 |
+
self.tar_base = tar_base
|
121 |
+
self.batch_size = batch_size
|
122 |
+
self.num_workers = num_workers
|
123 |
+
self.train = train
|
124 |
+
self.validation = validation
|
125 |
+
self.test = test
|
126 |
+
self.multinode = multinode
|
127 |
+
self.min_size = min_size # filter out very small images
|
128 |
+
self.max_pwatermark = max_pwatermark # filter out watermarked images
|
129 |
+
|
130 |
+
def make_loader(self, dataset_config, train=True):
|
131 |
+
if 'image_transforms' in dataset_config:
|
132 |
+
image_transforms = [instantiate_from_config(tt) for tt in dataset_config.image_transforms]
|
133 |
+
else:
|
134 |
+
image_transforms = []
|
135 |
+
|
136 |
+
image_transforms.extend([torchvision.transforms.ToTensor(),
|
137 |
+
torchvision.transforms.Lambda(lambda x: rearrange(x * 2. - 1., 'c h w -> h w c'))])
|
138 |
+
image_transforms = torchvision.transforms.Compose(image_transforms)
|
139 |
+
|
140 |
+
if 'transforms' in dataset_config:
|
141 |
+
transforms_config = OmegaConf.to_container(dataset_config.transforms)
|
142 |
+
else:
|
143 |
+
transforms_config = dict()
|
144 |
+
|
145 |
+
transform_dict = {dkey: load_partial_from_config(transforms_config[dkey])
|
146 |
+
if transforms_config[dkey] != 'identity' else identity
|
147 |
+
for dkey in transforms_config}
|
148 |
+
img_key = dataset_config.get('image_key', 'jpeg')
|
149 |
+
transform_dict.update({img_key: image_transforms})
|
150 |
+
|
151 |
+
if 'postprocess' in dataset_config:
|
152 |
+
postprocess = instantiate_from_config(dataset_config['postprocess'])
|
153 |
+
else:
|
154 |
+
postprocess = None
|
155 |
+
|
156 |
+
shuffle = dataset_config.get('shuffle', 0)
|
157 |
+
shardshuffle = shuffle > 0
|
158 |
+
|
159 |
+
nodesplitter = wds.shardlists.split_by_node if self.multinode else wds.shardlists.single_node_only
|
160 |
+
|
161 |
+
if self.tar_base == "__improvedaesthetic__":
|
162 |
+
print("## Warning, loading the same improved aesthetic dataset "
|
163 |
+
"for all splits and ignoring shards parameter.")
|
164 |
+
tars = "pipe:aws s3 cp s3://s-laion/improved-aesthetics-laion-2B-en-subsets/aesthetics_tars/{000000..060207}.tar -"
|
165 |
+
else:
|
166 |
+
tars = os.path.join(self.tar_base, dataset_config.shards)
|
167 |
+
|
168 |
+
dset = wds.WebDataset(
|
169 |
+
tars,
|
170 |
+
nodesplitter=nodesplitter,
|
171 |
+
shardshuffle=shardshuffle,
|
172 |
+
handler=wds.warn_and_continue).repeat().shuffle(shuffle)
|
173 |
+
print(f'Loading webdataset with {len(dset.pipeline[0].urls)} shards.')
|
174 |
+
|
175 |
+
dset = (dset
|
176 |
+
.select(self.filter_keys)
|
177 |
+
.decode('pil', handler=wds.warn_and_continue)
|
178 |
+
.select(self.filter_size)
|
179 |
+
.map_dict(**transform_dict, handler=wds.warn_and_continue)
|
180 |
+
)
|
181 |
+
if postprocess is not None:
|
182 |
+
dset = dset.map(postprocess)
|
183 |
+
dset = (dset
|
184 |
+
.batched(self.batch_size, partial=False,
|
185 |
+
collation_fn=dict_collation_fn)
|
186 |
+
)
|
187 |
+
|
188 |
+
num_workers = self.num_workers
|
189 |
+
if not train:
|
190 |
+
num_workers = 1
|
191 |
+
loader = wds.WebLoader(dset, batch_size=None, shuffle=False,
|
192 |
+
num_workers=num_workers)
|
193 |
+
|
194 |
+
return loader
|
195 |
+
|
196 |
+
def filter_size(self, x):
|
197 |
+
try:
|
198 |
+
valid = True
|
199 |
+
if self.min_size is not None and self.min_size > 1:
|
200 |
+
try:
|
201 |
+
valid = valid and x['json']['original_width'] >= self.min_size and x['json']['original_height'] >= self.min_size
|
202 |
+
except Exception:
|
203 |
+
valid = False
|
204 |
+
if self.max_pwatermark is not None and self.max_pwatermark < 1.0:
|
205 |
+
try:
|
206 |
+
valid = valid and x['json']['pwatermark'] <= self.max_pwatermark
|
207 |
+
except Exception:
|
208 |
+
valid = False
|
209 |
+
return valid
|
210 |
+
except Exception:
|
211 |
+
return False
|
212 |
+
|
213 |
+
def filter_keys(self, x):
|
214 |
+
try:
|
215 |
+
return ("jpg" in x) and ("txt" in x)
|
216 |
+
except Exception:
|
217 |
+
return False
|
218 |
+
|
219 |
+
def train_dataloader(self):
|
220 |
+
return self.make_loader(self.train)
|
221 |
+
|
222 |
+
def val_dataloader(self):
|
223 |
+
return self.make_loader(self.validation, train=False)
|
224 |
+
|
225 |
+
def test_dataloader(self):
|
226 |
+
return self.make_loader(self.test, train=False)
|
227 |
+
|
228 |
+
|
229 |
+
from ldm.modules.image_degradation import degradation_fn_bsr_light
|
230 |
+
|
231 |
+
class AddLR(object):
|
232 |
+
def __init__(self, factor):
|
233 |
+
self.factor = factor
|
234 |
+
|
235 |
+
def pt2np(self, x):
|
236 |
+
x = ((x+1.0)*127.5).clamp(0, 255).to(dtype=torch.uint8).detach().cpu().numpy()
|
237 |
+
return x
|
238 |
+
|
239 |
+
def np2pt(self, x):
|
240 |
+
x = torch.from_numpy(x)/127.5-1.0
|
241 |
+
return x
|
242 |
+
|
243 |
+
def __call__(self, sample):
|
244 |
+
# sample['jpg'] is tensor hwc in [-1, 1] at this point
|
245 |
+
x = self.pt2np(sample['jpg'])
|
246 |
+
x = degradation_fn_bsr_light(x, sf=self.factor)['image']
|
247 |
+
x = self.np2pt(x)
|
248 |
+
sample['lr'] = x
|
249 |
+
return sample
|
250 |
+
|
251 |
+
|
252 |
+
class AddMask(PRNGMixin):
|
253 |
+
def __init__(self, mode="512train", p_drop=0.):
|
254 |
+
super().__init__()
|
255 |
+
assert mode in list(MASK_MODES.keys()), f'unknown mask generation mode "{mode}"'
|
256 |
+
self.make_mask = MASK_MODES[mode]
|
257 |
+
self.p_drop = p_drop
|
258 |
+
|
259 |
+
def __call__(self, sample):
|
260 |
+
# sample['jpg'] is tensor hwc in [-1, 1] at this point
|
261 |
+
x = sample['jpg']
|
262 |
+
mask = self.make_mask(self.prng, x.shape[0], x.shape[1])
|
263 |
+
if self.prng.choice(2, p=[1 - self.p_drop, self.p_drop]):
|
264 |
+
mask = np.ones_like(mask)
|
265 |
+
mask[mask < 0.5] = 0
|
266 |
+
mask[mask > 0.5] = 1
|
267 |
+
mask = torch.from_numpy(mask[..., None])
|
268 |
+
sample['mask'] = mask
|
269 |
+
sample['masked_image'] = x * (mask < 0.5)
|
270 |
+
return sample
|
271 |
+
|
272 |
+
|
273 |
+
class AddEdge(PRNGMixin):
|
274 |
+
def __init__(self, mode="512train", mask_edges=True):
|
275 |
+
super().__init__()
|
276 |
+
assert mode in list(MASK_MODES.keys()), f'unknown mask generation mode "{mode}"'
|
277 |
+
self.make_mask = MASK_MODES[mode]
|
278 |
+
self.n_down_choices = [0]
|
279 |
+
self.sigma_choices = [1, 2]
|
280 |
+
self.mask_edges = mask_edges
|
281 |
+
|
282 |
+
@torch.no_grad()
|
283 |
+
def __call__(self, sample):
|
284 |
+
# sample['jpg'] is tensor hwc in [-1, 1] at this point
|
285 |
+
x = sample['jpg']
|
286 |
+
|
287 |
+
mask = self.make_mask(self.prng, x.shape[0], x.shape[1])
|
288 |
+
mask[mask < 0.5] = 0
|
289 |
+
mask[mask > 0.5] = 1
|
290 |
+
mask = torch.from_numpy(mask[..., None])
|
291 |
+
sample['mask'] = mask
|
292 |
+
|
293 |
+
n_down_idx = self.prng.choice(len(self.n_down_choices))
|
294 |
+
sigma_idx = self.prng.choice(len(self.sigma_choices))
|
295 |
+
|
296 |
+
n_choices = len(self.n_down_choices)*len(self.sigma_choices)
|
297 |
+
raveled_idx = np.ravel_multi_index((n_down_idx, sigma_idx),
|
298 |
+
(len(self.n_down_choices), len(self.sigma_choices)))
|
299 |
+
normalized_idx = raveled_idx/max(1, n_choices-1)
|
300 |
+
|
301 |
+
n_down = self.n_down_choices[n_down_idx]
|
302 |
+
sigma = self.sigma_choices[sigma_idx]
|
303 |
+
|
304 |
+
kernel_size = 4*sigma+1
|
305 |
+
kernel_size = (kernel_size, kernel_size)
|
306 |
+
sigma = (sigma, sigma)
|
307 |
+
canny = kornia.filters.Canny(
|
308 |
+
low_threshold=0.1,
|
309 |
+
high_threshold=0.2,
|
310 |
+
kernel_size=kernel_size,
|
311 |
+
sigma=sigma,
|
312 |
+
hysteresis=True,
|
313 |
+
)
|
314 |
+
y = (x+1.0)/2.0 # in 01
|
315 |
+
y = y.unsqueeze(0).permute(0, 3, 1, 2).contiguous()
|
316 |
+
|
317 |
+
# down
|
318 |
+
for i_down in range(n_down):
|
319 |
+
size = min(y.shape[-2], y.shape[-1])//2
|
320 |
+
y = kornia.geometry.transform.resize(y, size, antialias=True)
|
321 |
+
|
322 |
+
# edge
|
323 |
+
_, y = canny(y)
|
324 |
+
|
325 |
+
if n_down > 0:
|
326 |
+
size = x.shape[0], x.shape[1]
|
327 |
+
y = kornia.geometry.transform.resize(y, size, interpolation="nearest")
|
328 |
+
|
329 |
+
y = y.permute(0, 2, 3, 1)[0].expand(-1, -1, 3).contiguous()
|
330 |
+
y = y*2.0-1.0
|
331 |
+
|
332 |
+
if self.mask_edges:
|
333 |
+
sample['masked_image'] = y * (mask < 0.5)
|
334 |
+
else:
|
335 |
+
sample['masked_image'] = y
|
336 |
+
sample['mask'] = torch.zeros_like(sample['mask'])
|
337 |
+
|
338 |
+
# concat normalized idx
|
339 |
+
sample['smoothing_strength'] = torch.ones_like(sample['mask'])*normalized_idx
|
340 |
+
|
341 |
+
return sample
|
342 |
+
|
343 |
+
|
344 |
+
def example00():
|
345 |
+
url = "pipe:aws s3 cp s3://s-datasets/laion5b/laion2B-data/000000.tar -"
|
346 |
+
dataset = wds.WebDataset(url)
|
347 |
+
example = next(iter(dataset))
|
348 |
+
for k in example:
|
349 |
+
print(k, type(example[k]))
|
350 |
+
|
351 |
+
print(example["__key__"])
|
352 |
+
for k in ["json", "txt"]:
|
353 |
+
print(example[k].decode())
|
354 |
+
|
355 |
+
image = Image.open(io.BytesIO(example["jpg"]))
|
356 |
+
outdir = "tmp"
|
357 |
+
os.makedirs(outdir, exist_ok=True)
|
358 |
+
image.save(os.path.join(outdir, example["__key__"] + ".png"))
|
359 |
+
|
360 |
+
|
361 |
+
def load_example(example):
|
362 |
+
return {
|
363 |
+
"key": example["__key__"],
|
364 |
+
"image": Image.open(io.BytesIO(example["jpg"])),
|
365 |
+
"text": example["txt"].decode(),
|
366 |
+
}
|
367 |
+
|
368 |
+
|
369 |
+
for i, example in tqdm(enumerate(dataset)):
|
370 |
+
ex = load_example(example)
|
371 |
+
print(ex["image"].size, ex["text"])
|
372 |
+
if i >= 100:
|
373 |
+
break
|
374 |
+
|
375 |
+
|
376 |
+
def example01():
|
377 |
+
# the first laion shards contain ~10k examples each
|
378 |
+
url = "pipe:aws s3 cp s3://s-datasets/laion5b/laion2B-data/{000000..000002}.tar -"
|
379 |
+
|
380 |
+
batch_size = 3
|
381 |
+
shuffle_buffer = 10000
|
382 |
+
dset = wds.WebDataset(
|
383 |
+
url,
|
384 |
+
nodesplitter=wds.shardlists.split_by_node,
|
385 |
+
shardshuffle=True,
|
386 |
+
)
|
387 |
+
dset = (dset
|
388 |
+
.shuffle(shuffle_buffer, initial=shuffle_buffer)
|
389 |
+
.decode('pil', handler=warn_and_continue)
|
390 |
+
.batched(batch_size, partial=False,
|
391 |
+
collation_fn=dict_collation_fn)
|
392 |
+
)
|
393 |
+
|
394 |
+
num_workers = 2
|
395 |
+
loader = wds.WebLoader(dset, batch_size=None, shuffle=False, num_workers=num_workers)
|
396 |
+
|
397 |
+
batch_sizes = list()
|
398 |
+
keys_per_epoch = list()
|
399 |
+
for epoch in range(5):
|
400 |
+
keys = list()
|
401 |
+
for batch in tqdm(loader):
|
402 |
+
batch_sizes.append(len(batch["__key__"]))
|
403 |
+
keys.append(batch["__key__"])
|
404 |
+
|
405 |
+
for bs in batch_sizes:
|
406 |
+
assert bs==batch_size
|
407 |
+
print(f"{len(batch_sizes)} batches of size {batch_size}.")
|
408 |
+
batch_sizes = list()
|
409 |
+
|
410 |
+
keys_per_epoch.append(keys)
|
411 |
+
for i_batch in [0, 1, -1]:
|
412 |
+
print(f"Batch {i_batch} of epoch {epoch}:")
|
413 |
+
print(keys[i_batch])
|
414 |
+
print("next epoch.")
|
415 |
+
|
416 |
+
|
417 |
+
def example02():
|
418 |
+
from omegaconf import OmegaConf
|
419 |
+
from torch.utils.data.distributed import DistributedSampler
|
420 |
+
from torch.utils.data import IterableDataset
|
421 |
+
from torch.utils.data import DataLoader, RandomSampler, Sampler, SequentialSampler
|
422 |
+
from pytorch_lightning.trainer.supporters import CombinedLoader, CycleIterator
|
423 |
+
|
424 |
+
#config = OmegaConf.load("configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512.yaml")
|
425 |
+
#config = OmegaConf.load("configs/stable-diffusion/txt2img-upscale-clip-encoder-f16-1024.yaml")
|
426 |
+
config = OmegaConf.load("configs/stable-diffusion/txt2img-v2-clip-encoder-improved_aesthetics-256.yaml")
|
427 |
+
datamod = WebDataModuleFromConfig(**config["data"]["params"])
|
428 |
+
dataloader = datamod.train_dataloader()
|
429 |
+
|
430 |
+
for batch in dataloader:
|
431 |
+
print(batch.keys())
|
432 |
+
print(batch["jpg"].shape)
|
433 |
+
break
|
434 |
+
|
435 |
+
|
436 |
+
def example03():
|
437 |
+
# improved aesthetics
|
438 |
+
tars = "pipe:aws s3 cp s3://s-laion/improved-aesthetics-laion-2B-en-subsets/aesthetics_tars/{000000..060207}.tar -"
|
439 |
+
dataset = wds.WebDataset(tars)
|
440 |
+
|
441 |
+
def filter_keys(x):
|
442 |
+
try:
|
443 |
+
return ("jpg" in x) and ("txt" in x)
|
444 |
+
except Exception:
|
445 |
+
return False
|
446 |
+
|
447 |
+
def filter_size(x):
|
448 |
+
try:
|
449 |
+
return x['json']['original_width'] >= 512 and x['json']['original_height'] >= 512
|
450 |
+
except Exception:
|
451 |
+
return False
|
452 |
+
|
453 |
+
def filter_watermark(x):
|
454 |
+
try:
|
455 |
+
return x['json']['pwatermark'] < 0.5
|
456 |
+
except Exception:
|
457 |
+
return False
|
458 |
+
|
459 |
+
dataset = (dataset
|
460 |
+
.select(filter_keys)
|
461 |
+
.decode('pil', handler=wds.warn_and_continue))
|
462 |
+
n_save = 20
|
463 |
+
n_total = 0
|
464 |
+
n_large = 0
|
465 |
+
n_large_nowm = 0
|
466 |
+
for i, example in enumerate(dataset):
|
467 |
+
n_total += 1
|
468 |
+
if filter_size(example):
|
469 |
+
n_large += 1
|
470 |
+
if filter_watermark(example):
|
471 |
+
n_large_nowm += 1
|
472 |
+
if n_large_nowm < n_save+1:
|
473 |
+
image = example["jpg"]
|
474 |
+
image.save(os.path.join("tmp", f"{n_large_nowm-1:06}.png"))
|
475 |
+
|
476 |
+
if i%500 == 0:
|
477 |
+
print(i)
|
478 |
+
print(f"Large: {n_large}/{n_total} | {n_large/n_total*100:.2f}%")
|
479 |
+
if n_large > 0:
|
480 |
+
print(f"No Watermark: {n_large_nowm}/{n_large} | {n_large_nowm/n_large*100:.2f}%")
|
481 |
+
|
482 |
+
|
483 |
+
|
484 |
+
def example04():
|
485 |
+
# improved aesthetics
|
486 |
+
for i_shard in range(60208)[::-1]:
|
487 |
+
print(i_shard)
|
488 |
+
tars = "pipe:aws s3 cp s3://s-laion/improved-aesthetics-laion-2B-en-subsets/aesthetics_tars/{:06}.tar -".format(i_shard)
|
489 |
+
dataset = wds.WebDataset(tars)
|
490 |
+
|
491 |
+
def filter_keys(x):
|
492 |
+
try:
|
493 |
+
return ("jpg" in x) and ("txt" in x)
|
494 |
+
except Exception:
|
495 |
+
return False
|
496 |
+
|
497 |
+
def filter_size(x):
|
498 |
+
try:
|
499 |
+
return x['json']['original_width'] >= 512 and x['json']['original_height'] >= 512
|
500 |
+
except Exception:
|
501 |
+
return False
|
502 |
+
|
503 |
+
dataset = (dataset
|
504 |
+
.select(filter_keys)
|
505 |
+
.decode('pil', handler=wds.warn_and_continue))
|
506 |
+
try:
|
507 |
+
example = next(iter(dataset))
|
508 |
+
except Exception:
|
509 |
+
print(f"Error @ {i_shard}")
|
510 |
+
|
511 |
+
|
512 |
+
if __name__ == "__main__":
|
513 |
+
#example01()
|
514 |
+
#example02()
|
515 |
+
example03()
|
516 |
+
#example04()
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/data/lsun.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import numpy as np
|
3 |
+
import PIL
|
4 |
+
from PIL import Image
|
5 |
+
from torch.utils.data import Dataset
|
6 |
+
from torchvision import transforms
|
7 |
+
|
8 |
+
|
9 |
+
class LSUNBase(Dataset):
|
10 |
+
def __init__(self,
|
11 |
+
txt_file,
|
12 |
+
data_root,
|
13 |
+
size=None,
|
14 |
+
interpolation="bicubic",
|
15 |
+
flip_p=0.5
|
16 |
+
):
|
17 |
+
self.data_paths = txt_file
|
18 |
+
self.data_root = data_root
|
19 |
+
with open(self.data_paths, "r") as f:
|
20 |
+
self.image_paths = f.read().splitlines()
|
21 |
+
self._length = len(self.image_paths)
|
22 |
+
self.labels = {
|
23 |
+
"relative_file_path_": [l for l in self.image_paths],
|
24 |
+
"file_path_": [os.path.join(self.data_root, l)
|
25 |
+
for l in self.image_paths],
|
26 |
+
}
|
27 |
+
|
28 |
+
self.size = size
|
29 |
+
self.interpolation = {"linear": PIL.Image.LINEAR,
|
30 |
+
"bilinear": PIL.Image.BILINEAR,
|
31 |
+
"bicubic": PIL.Image.BICUBIC,
|
32 |
+
"lanczos": PIL.Image.LANCZOS,
|
33 |
+
}[interpolation]
|
34 |
+
self.flip = transforms.RandomHorizontalFlip(p=flip_p)
|
35 |
+
|
36 |
+
def __len__(self):
|
37 |
+
return self._length
|
38 |
+
|
39 |
+
def __getitem__(self, i):
|
40 |
+
example = dict((k, self.labels[k][i]) for k in self.labels)
|
41 |
+
image = Image.open(example["file_path_"])
|
42 |
+
if not image.mode == "RGB":
|
43 |
+
image = image.convert("RGB")
|
44 |
+
|
45 |
+
# default to score-sde preprocessing
|
46 |
+
img = np.array(image).astype(np.uint8)
|
47 |
+
crop = min(img.shape[0], img.shape[1])
|
48 |
+
h, w, = img.shape[0], img.shape[1]
|
49 |
+
img = img[(h - crop) // 2:(h + crop) // 2,
|
50 |
+
(w - crop) // 2:(w + crop) // 2]
|
51 |
+
|
52 |
+
image = Image.fromarray(img)
|
53 |
+
if self.size is not None:
|
54 |
+
image = image.resize((self.size, self.size), resample=self.interpolation)
|
55 |
+
|
56 |
+
image = self.flip(image)
|
57 |
+
image = np.array(image).astype(np.uint8)
|
58 |
+
example["image"] = (image / 127.5 - 1.0).astype(np.float32)
|
59 |
+
return example
|
60 |
+
|
61 |
+
|
62 |
+
class LSUNChurchesTrain(LSUNBase):
|
63 |
+
def __init__(self, **kwargs):
|
64 |
+
super().__init__(txt_file="data/lsun/church_outdoor_train.txt", data_root="data/lsun/churches", **kwargs)
|
65 |
+
|
66 |
+
|
67 |
+
class LSUNChurchesValidation(LSUNBase):
|
68 |
+
def __init__(self, flip_p=0., **kwargs):
|
69 |
+
super().__init__(txt_file="data/lsun/church_outdoor_val.txt", data_root="data/lsun/churches",
|
70 |
+
flip_p=flip_p, **kwargs)
|
71 |
+
|
72 |
+
|
73 |
+
class LSUNBedroomsTrain(LSUNBase):
|
74 |
+
def __init__(self, **kwargs):
|
75 |
+
super().__init__(txt_file="data/lsun/bedrooms_train.txt", data_root="data/lsun/bedrooms", **kwargs)
|
76 |
+
|
77 |
+
|
78 |
+
class LSUNBedroomsValidation(LSUNBase):
|
79 |
+
def __init__(self, flip_p=0.0, **kwargs):
|
80 |
+
super().__init__(txt_file="data/lsun/bedrooms_val.txt", data_root="data/lsun/bedrooms",
|
81 |
+
flip_p=flip_p, **kwargs)
|
82 |
+
|
83 |
+
|
84 |
+
class LSUNCatsTrain(LSUNBase):
|
85 |
+
def __init__(self, **kwargs):
|
86 |
+
super().__init__(txt_file="data/lsun/cat_train.txt", data_root="data/lsun/cats", **kwargs)
|
87 |
+
|
88 |
+
|
89 |
+
class LSUNCatsValidation(LSUNBase):
|
90 |
+
def __init__(self, flip_p=0., **kwargs):
|
91 |
+
super().__init__(txt_file="data/lsun/cat_val.txt", data_root="data/lsun/cats",
|
92 |
+
flip_p=flip_p, **kwargs)
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/lr_scheduler.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
|
4 |
+
class LambdaWarmUpCosineScheduler:
|
5 |
+
"""
|
6 |
+
note: use with a base_lr of 1.0
|
7 |
+
"""
|
8 |
+
def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0):
|
9 |
+
self.lr_warm_up_steps = warm_up_steps
|
10 |
+
self.lr_start = lr_start
|
11 |
+
self.lr_min = lr_min
|
12 |
+
self.lr_max = lr_max
|
13 |
+
self.lr_max_decay_steps = max_decay_steps
|
14 |
+
self.last_lr = 0.
|
15 |
+
self.verbosity_interval = verbosity_interval
|
16 |
+
|
17 |
+
def schedule(self, n, **kwargs):
|
18 |
+
if self.verbosity_interval > 0:
|
19 |
+
if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}")
|
20 |
+
if n < self.lr_warm_up_steps:
|
21 |
+
lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start
|
22 |
+
self.last_lr = lr
|
23 |
+
return lr
|
24 |
+
else:
|
25 |
+
t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps)
|
26 |
+
t = min(t, 1.0)
|
27 |
+
lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * (
|
28 |
+
1 + np.cos(t * np.pi))
|
29 |
+
self.last_lr = lr
|
30 |
+
return lr
|
31 |
+
|
32 |
+
def __call__(self, n, **kwargs):
|
33 |
+
return self.schedule(n,**kwargs)
|
34 |
+
|
35 |
+
|
36 |
+
class LambdaWarmUpCosineScheduler2:
|
37 |
+
"""
|
38 |
+
supports repeated iterations, configurable via lists
|
39 |
+
note: use with a base_lr of 1.0.
|
40 |
+
"""
|
41 |
+
def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0):
|
42 |
+
assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths)
|
43 |
+
self.lr_warm_up_steps = warm_up_steps
|
44 |
+
self.f_start = f_start
|
45 |
+
self.f_min = f_min
|
46 |
+
self.f_max = f_max
|
47 |
+
self.cycle_lengths = cycle_lengths
|
48 |
+
self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths))
|
49 |
+
self.last_f = 0.
|
50 |
+
self.verbosity_interval = verbosity_interval
|
51 |
+
|
52 |
+
def find_in_interval(self, n):
|
53 |
+
interval = 0
|
54 |
+
for cl in self.cum_cycles[1:]:
|
55 |
+
if n <= cl:
|
56 |
+
return interval
|
57 |
+
interval += 1
|
58 |
+
|
59 |
+
def schedule(self, n, **kwargs):
|
60 |
+
cycle = self.find_in_interval(n)
|
61 |
+
n = n - self.cum_cycles[cycle]
|
62 |
+
if self.verbosity_interval > 0:
|
63 |
+
if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
|
64 |
+
f"current cycle {cycle}")
|
65 |
+
if n < self.lr_warm_up_steps[cycle]:
|
66 |
+
f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
|
67 |
+
self.last_f = f
|
68 |
+
return f
|
69 |
+
else:
|
70 |
+
t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle])
|
71 |
+
t = min(t, 1.0)
|
72 |
+
f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * (
|
73 |
+
1 + np.cos(t * np.pi))
|
74 |
+
self.last_f = f
|
75 |
+
return f
|
76 |
+
|
77 |
+
def __call__(self, n, **kwargs):
|
78 |
+
return self.schedule(n, **kwargs)
|
79 |
+
|
80 |
+
|
81 |
+
class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2):
|
82 |
+
|
83 |
+
def schedule(self, n, **kwargs):
|
84 |
+
cycle = self.find_in_interval(n)
|
85 |
+
n = n - self.cum_cycles[cycle]
|
86 |
+
if self.verbosity_interval > 0:
|
87 |
+
if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
|
88 |
+
f"current cycle {cycle}")
|
89 |
+
|
90 |
+
if n < self.lr_warm_up_steps[cycle]:
|
91 |
+
f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
|
92 |
+
self.last_f = f
|
93 |
+
return f
|
94 |
+
else:
|
95 |
+
f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle])
|
96 |
+
self.last_f = f
|
97 |
+
return f
|
98 |
+
|
docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion/ldm/models/autoencoder.py
ADDED
@@ -0,0 +1,449 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
from lightning_utilities import module_available
|
4 |
+
|
5 |
+
if module_available("lightning"):
|
6 |
+
import lightning.pytorch as pl
|
7 |
+
elif module_available("pytorch_lightning"):
|
8 |
+
import pytorch_lightning as pl
|
9 |
+
import torch.nn.functional as F
|
10 |
+
from contextlib import contextmanager
|
11 |
+
|
12 |
+
from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
|
13 |
+
|
14 |
+
from ldm.modules.diffusionmodules.model import Encoder, Decoder
|
15 |
+
from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
|
16 |
+
|
17 |
+
from ldm.util import instantiate_from_config
|
18 |
+
|
19 |
+
|
20 |
+
class VQModel(pl.LightningModule):
|
21 |
+
def __init__(self,
|
22 |
+
ddconfig,
|
23 |
+
lossconfig,
|
24 |
+
n_embed,
|
25 |
+
embed_dim,
|
26 |
+
ckpt_path=None,
|
27 |
+
ignore_keys=[],
|
28 |
+
image_key="image",
|
29 |
+
colorize_nlabels=None,
|
30 |
+
monitor=None,
|
31 |
+
batch_resize_range=None,
|
32 |
+
scheduler_config=None,
|
33 |
+
lr_g_factor=1.0,
|
34 |
+
remap=None,
|
35 |
+
sane_index_shape=False, # tell vector quantizer to return indices as bhw
|
36 |
+
use_ema=False
|
37 |
+
):
|
38 |
+
super().__init__()
|
39 |
+
self.embed_dim = embed_dim
|
40 |
+
self.n_embed = n_embed
|
41 |
+
self.image_key = image_key
|
42 |
+
self.encoder = Encoder(**ddconfig)
|
43 |
+
self.decoder = Decoder(**ddconfig)
|
44 |
+
self.loss = instantiate_from_config(lossconfig)
|
45 |
+
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
|
46 |
+
remap=remap,
|
47 |
+
sane_index_shape=sane_index_shape)
|
48 |
+
self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
|
49 |
+
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
|
50 |
+
if colorize_nlabels is not None:
|
51 |
+
assert type(colorize_nlabels)==int
|
52 |
+
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
|
53 |
+
if monitor is not None:
|
54 |
+
self.monitor = monitor
|
55 |
+
self.batch_resize_range = batch_resize_range
|
56 |
+
if self.batch_resize_range is not None:
|
57 |
+
print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")
|
58 |
+
|
59 |
+
self.use_ema = use_ema
|
60 |
+
if self.use_ema:
|
61 |
+
self.model_ema = LitEma(self)
|
62 |
+
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
63 |
+
|
64 |
+
if ckpt_path is not None:
|
65 |
+
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
|
66 |
+
self.scheduler_config = scheduler_config
|
67 |
+
self.lr_g_factor = lr_g_factor
|
68 |
+
|
69 |
+
@contextmanager
|
70 |
+
def ema_scope(self, context=None):
|
71 |
+
if self.use_ema:
|
72 |
+
self.model_ema.store(self.parameters())
|
73 |
+
self.model_ema.copy_to(self)
|
74 |
+
if context is not None:
|
75 |
+
print(f"{context}: Switched to EMA weights")
|
76 |
+
try:
|
77 |
+
yield None
|
78 |
+
finally:
|
79 |
+
if self.use_ema:
|
80 |
+
self.model_ema.restore(self.parameters())
|
81 |
+
if context is not None:
|
82 |
+
print(f"{context}: Restored training weights")
|
83 |
+
|
84 |
+
def init_from_ckpt(self, path, ignore_keys=list()):
|
85 |
+
sd = torch.load(path, map_location="cpu")["state_dict"]
|
86 |
+
keys = list(sd.keys())
|
87 |
+
for k in keys:
|
88 |
+
for ik in ignore_keys:
|
89 |
+
if k.startswith(ik):
|
90 |
+
print("Deleting key {} from state_dict.".format(k))
|
91 |
+
del sd[k]
|
92 |
+
missing, unexpected = self.load_state_dict(sd, strict=False)
|
93 |
+
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
94 |
+
if len(missing) > 0:
|
95 |
+
print(f"Missing Keys: {missing}")
|
96 |
+
print(f"Unexpected Keys: {unexpected}")
|
97 |
+
|
98 |
+
def on_train_batch_end(self, *args, **kwargs):
|
99 |
+
if self.use_ema:
|
100 |
+
self.model_ema(self)
|
101 |
+
|
102 |
+
def encode(self, x):
|
103 |
+
h = self.encoder(x)
|
104 |
+
h = self.quant_conv(h)
|
105 |
+
quant, emb_loss, info = self.quantize(h)
|
106 |
+
return quant, emb_loss, info
|
107 |
+
|
108 |
+
def encode_to_prequant(self, x):
|
109 |
+
h = self.encoder(x)
|
110 |
+
h = self.quant_conv(h)
|
111 |
+
return h
|
112 |
+
|
113 |
+
def decode(self, quant):
|
114 |
+
quant = self.post_quant_conv(quant)
|
115 |
+
dec = self.decoder(quant)
|
116 |
+
return dec
|
117 |
+
|
118 |
+
def decode_code(self, code_b):
|
119 |
+
quant_b = self.quantize.embed_code(code_b)
|
120 |
+
dec = self.decode(quant_b)
|
121 |
+
return dec
|
122 |
+
|
123 |
+
def forward(self, input, return_pred_indices=False):
|
124 |
+
quant, diff, (_,_,ind) = self.encode(input)
|
125 |
+
dec = self.decode(quant)
|
126 |
+
if return_pred_indices:
|
127 |
+
return dec, diff, ind
|
128 |
+
return dec, diff
|
129 |
+
|
130 |
+
def get_input(self, batch, k):
|
131 |
+
x = batch[k]
|
132 |
+
if len(x.shape) == 3:
|
133 |
+
x = x[..., None]
|
134 |
+
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
|
135 |
+
if self.batch_resize_range is not None:
|
136 |
+
lower_size = self.batch_resize_range[0]
|
137 |
+
upper_size = self.batch_resize_range[1]
|
138 |
+
if self.global_step <= 4:
|
139 |
+
# do the first few batches with max size to avoid later oom
|
140 |
+
new_resize = upper_size
|
141 |
+
else:
|
142 |
+
new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
|
143 |
+
if new_resize != x.shape[2]:
|
144 |
+
x = F.interpolate(x, size=new_resize, mode="bicubic")
|
145 |
+
x = x.detach()
|
146 |
+
return x
|
147 |
+
|
148 |
+
def training_step(self, batch, batch_idx, optimizer_idx):
|
149 |
+
# https://github.com/pytorch/pytorch/issues/37142
|
150 |
+
# try not to fool the heuristics
|
151 |
+
x = self.get_input(batch, self.image_key)
|
152 |
+
xrec, qloss, ind = self(x, return_pred_indices=True)
|
153 |
+
|
154 |
+
if optimizer_idx == 0:
|
155 |
+
# autoencode
|
156 |
+
aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
|
157 |
+
last_layer=self.get_last_layer(), split="train",
|
158 |
+
predicted_indices=ind)
|
159 |
+
|
160 |
+
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
161 |
+
return aeloss
|
162 |
+
|
163 |
+
if optimizer_idx == 1:
|
164 |
+
# discriminator
|
165 |
+
discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
|
166 |
+
last_layer=self.get_last_layer(), split="train")
|
167 |
+
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
168 |
+
return discloss
|
169 |
+
|
170 |
+
def validation_step(self, batch, batch_idx):
|
171 |
+
log_dict = self._validation_step(batch, batch_idx)
|
172 |
+
with self.ema_scope():
|
173 |
+
log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema")
|
174 |
+
return log_dict
|
175 |
+
|
176 |
+
def _validation_step(self, batch, batch_idx, suffix=""):
|
177 |
+
x = self.get_input(batch, self.image_key)
|
178 |
+
xrec, qloss, ind = self(x, return_pred_indices=True)
|
179 |
+
aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
|
180 |
+
self.global_step,
|
181 |
+
last_layer=self.get_last_layer(),
|
182 |
+
split="val"+suffix,
|
183 |
+
predicted_indices=ind
|
184 |
+
)
|
185 |
+
|
186 |
+
discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
|
187 |
+
self.global_step,
|
188 |
+
last_layer=self.get_last_layer(),
|
189 |
+
split="val"+suffix,
|
190 |
+
predicted_indices=ind
|
191 |
+
)
|
192 |
+
rec_loss = log_dict_ae[f"val{suffix}/rec_loss"]
|
193 |
+
self.log(f"val{suffix}/rec_loss", rec_loss,
|
194 |
+
prog_bar=True, logger=True, on_step=False, on_epoch=True)
|
195 |
+
self.log(f"val{suffix}/aeloss", aeloss,
|
196 |
+
prog_bar=True, logger=True, on_step=False, on_epoch=True)
|
197 |
+
if version.parse(pl.__version__) >= version.parse('1.4.0'):
|
198 |
+
del log_dict_ae[f"val{suffix}/rec_loss"]
|
199 |
+
self.log_dict(log_dict_ae)
|
200 |
+
self.log_dict(log_dict_disc)
|
201 |
+
return self.log_dict
|
202 |
+
|
203 |
+
def configure_optimizers(self):
|
204 |
+
lr_d = self.learning_rate
|
205 |
+
lr_g = self.lr_g_factor*self.learning_rate
|
206 |
+
print("lr_d", lr_d)
|
207 |
+
print("lr_g", lr_g)
|
208 |
+
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
|
209 |
+
list(self.decoder.parameters())+
|
210 |
+
list(self.quantize.parameters())+
|
211 |
+
list(self.quant_conv.parameters())+
|
212 |
+
list(self.post_quant_conv.parameters()),
|
213 |
+
lr=lr_g, betas=(0.5, 0.9))
|
214 |
+
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
|
215 |
+
lr=lr_d, betas=(0.5, 0.9))
|
216 |
+
|
217 |
+
if self.scheduler_config is not None:
|
218 |
+
scheduler = instantiate_from_config(self.scheduler_config)
|
219 |
+
|
220 |
+
print("Setting up LambdaLR scheduler...")
|
221 |
+
scheduler = [
|
222 |
+
{
|
223 |
+
'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule),
|
224 |
+
'interval': 'step',
|
225 |
+
'frequency': 1
|
226 |
+
},
|
227 |
+
{
|
228 |
+
'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule),
|
229 |
+
'interval': 'step',
|
230 |
+
'frequency': 1
|
231 |
+
},
|
232 |
+
]
|
233 |
+
return [opt_ae, opt_disc], scheduler
|
234 |
+
return [opt_ae, opt_disc], []
|
235 |
+
|
236 |
+
def get_last_layer(self):
|
237 |
+
return self.decoder.conv_out.weight
|
238 |
+
|
239 |
+
def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
|
240 |
+
log = dict()
|
241 |
+
x = self.get_input(batch, self.image_key)
|
242 |
+
x = x.to(self.device)
|
243 |
+
if only_inputs:
|
244 |
+
log["inputs"] = x
|
245 |
+
return log
|
246 |
+
xrec, _ = self(x)
|
247 |
+
if x.shape[1] > 3:
|
248 |
+
# colorize with random projection
|
249 |
+
assert xrec.shape[1] > 3
|
250 |
+
x = self.to_rgb(x)
|
251 |
+
xrec = self.to_rgb(xrec)
|
252 |
+
log["inputs"] = x
|
253 |
+
log["reconstructions"] = xrec
|
254 |
+
if plot_ema:
|
255 |
+
with self.ema_scope():
|
256 |
+
xrec_ema, _ = self(x)
|
257 |
+
if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema)
|
258 |
+
log["reconstructions_ema"] = xrec_ema
|
259 |
+
return log
|
260 |
+
|
261 |
+
def to_rgb(self, x):
|
262 |
+
assert self.image_key == "segmentation"
|
263 |
+
if not hasattr(self, "colorize"):
|
264 |
+
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
|
265 |
+
x = F.conv2d(x, weight=self.colorize)
|
266 |
+
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
|
267 |
+
return x
|
268 |
+
|
269 |
+
|
270 |
+
class VQModelInterface(VQModel):
|
271 |
+
def __init__(self, embed_dim, *args, **kwargs):
|
272 |
+
super().__init__(embed_dim=embed_dim, *args, **kwargs)
|
273 |
+
self.embed_dim = embed_dim
|
274 |
+
|
275 |
+
def encode(self, x):
|
276 |
+
h = self.encoder(x)
|
277 |
+
h = self.quant_conv(h)
|
278 |
+
return h
|
279 |
+
|
280 |
+
def decode(self, h, force_not_quantize=False):
|
281 |
+
# also go through quantization layer
|
282 |
+
if not force_not_quantize:
|
283 |
+
quant, emb_loss, info = self.quantize(h)
|
284 |
+
else:
|
285 |
+
quant = h
|
286 |
+
quant = self.post_quant_conv(quant)
|
287 |
+
dec = self.decoder(quant)
|
288 |
+
return dec
|
289 |
+
|
290 |
+
|
291 |
+
class AutoencoderKL(pl.LightningModule):
|
292 |
+
def __init__(self,
|
293 |
+
ddconfig,
|
294 |
+
lossconfig,
|
295 |
+
embed_dim,
|
296 |
+
ckpt_path=None,
|
297 |
+
ignore_keys=[],
|
298 |
+
image_key="image",
|
299 |
+
colorize_nlabels=None,
|
300 |
+
monitor=None
|
301 |
+
):
|
302 |
+
super().__init__()
|
303 |
+
self.image_key = image_key
|
304 |
+
self.encoder = Encoder(**ddconfig)
|
305 |
+
self.decoder = Decoder(**ddconfig)
|
306 |
+
self.loss = instantiate_from_config(lossconfig)
|
307 |
+
assert ddconfig["double_z"]
|
308 |
+
self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
|
309 |
+
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
|
310 |
+
self.embed_dim = embed_dim
|
311 |
+
if colorize_nlabels is not None:
|
312 |
+
assert type(colorize_nlabels)==int
|
313 |
+
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
|
314 |
+
if monitor is not None:
|
315 |
+
self.monitor = monitor
|
316 |
+
if ckpt_path is not None:
|
317 |
+
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
|
318 |
+
|
319 |
+
def init_from_ckpt(self, path, ignore_keys=list()):
|
320 |
+
sd = torch.load(path, map_location="cpu")["state_dict"]
|
321 |
+
keys = list(sd.keys())
|
322 |
+
for k in keys:
|
323 |
+
for ik in ignore_keys:
|
324 |
+
if k.startswith(ik):
|
325 |
+
print("Deleting key {} from state_dict.".format(k))
|
326 |
+
del sd[k]
|
327 |
+
self.load_state_dict(sd, strict=False)
|
328 |
+
print(f"Restored from {path}")
|
329 |
+
|
330 |
+
def encode(self, x):
|
331 |
+
h = self.encoder(x)
|
332 |
+
moments = self.quant_conv(h)
|
333 |
+
posterior = DiagonalGaussianDistribution(moments)
|
334 |
+
return posterior
|
335 |
+
|
336 |
+
def decode(self, z):
|
337 |
+
z = self.post_quant_conv(z)
|
338 |
+
dec = self.decoder(z)
|
339 |
+
return dec
|
340 |
+
|
341 |
+
def forward(self, input, sample_posterior=True):
|
342 |
+
posterior = self.encode(input)
|
343 |
+
if sample_posterior:
|
344 |
+
z = posterior.sample()
|
345 |
+
else:
|
346 |
+
z = posterior.mode()
|
347 |
+
dec = self.decode(z)
|
348 |
+
return dec, posterior
|
349 |
+
|
350 |
+
def get_input(self, batch, k):
|
351 |
+
x = batch[k]
|
352 |
+
if len(x.shape) == 3:
|
353 |
+
x = x[..., None]
|
354 |
+
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
|
355 |
+
return x
|
356 |
+
|
357 |
+
def training_step(self, batch, batch_idx, optimizer_idx):
|
358 |
+
inputs = self.get_input(batch, self.image_key)
|
359 |
+
reconstructions, posterior = self(inputs)
|
360 |
+
|
361 |
+
if optimizer_idx == 0:
|
362 |
+
# train encoder+decoder+logvar
|
363 |
+
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
|
364 |
+
last_layer=self.get_last_layer(), split="train")
|
365 |
+
self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
366 |
+
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
|
367 |
+
return aeloss
|
368 |
+
|
369 |
+
if optimizer_idx == 1:
|
370 |
+
# train the discriminator
|
371 |
+
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
|
372 |
+
last_layer=self.get_last_layer(), split="train")
|
373 |
+
|
374 |
+
self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
375 |
+
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
|
376 |
+
return discloss
|
377 |
+
|
378 |
+
def validation_step(self, batch, batch_idx):
|
379 |
+
inputs = self.get_input(batch, self.image_key)
|
380 |
+
reconstructions, posterior = self(inputs)
|
381 |
+
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
|
382 |
+
last_layer=self.get_last_layer(), split="val")
|
383 |
+
|
384 |
+
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
|
385 |
+
last_layer=self.get_last_layer(), split="val")
|
386 |
+
|
387 |
+
self.log("val/rec_loss", log_dict_ae["val/rec_loss"])
|
388 |
+
self.log_dict(log_dict_ae)
|
389 |
+
self.log_dict(log_dict_disc)
|
390 |
+
return self.log_dict
|
391 |
+
|
392 |
+
def configure_optimizers(self):
|
393 |
+
lr = self.learning_rate
|
394 |
+
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
|
395 |
+
list(self.decoder.parameters())+
|
396 |
+
list(self.quant_conv.parameters())+
|
397 |
+
list(self.post_quant_conv.parameters()),
|
398 |
+
lr=lr, betas=(0.5, 0.9))
|
399 |
+
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
|
400 |
+
lr=lr, betas=(0.5, 0.9))
|
401 |
+
return [opt_ae, opt_disc], []
|
402 |
+
|
403 |
+
def get_last_layer(self):
|
404 |
+
return self.decoder.conv_out.weight
|
405 |
+
|
406 |
+
@torch.no_grad()
|
407 |
+
def log_images(self, batch, only_inputs=False, **kwargs):
|
408 |
+
log = dict()
|
409 |
+
x = self.get_input(batch, self.image_key)
|
410 |
+
x = x.to(self.device)
|
411 |
+
if not only_inputs:
|
412 |
+
xrec, posterior = self(x)
|
413 |
+
if x.shape[1] > 3:
|
414 |
+
# colorize with random projection
|
415 |
+
assert xrec.shape[1] > 3
|
416 |
+
x = self.to_rgb(x)
|
417 |
+
xrec = self.to_rgb(xrec)
|
418 |
+
log["samples"] = self.decode(torch.randn_like(posterior.sample()))
|
419 |
+
log["reconstructions"] = xrec
|
420 |
+
log["inputs"] = x
|
421 |
+
return log
|
422 |
+
|
423 |
+
def to_rgb(self, x):
|
424 |
+
assert self.image_key == "segmentation"
|
425 |
+
if not hasattr(self, "colorize"):
|
426 |
+
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
|
427 |
+
x = F.conv2d(x, weight=self.colorize)
|
428 |
+
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
|
429 |
+
return x
|
430 |
+
|
431 |
+
|
432 |
+
class IdentityFirstStage(torch.nn.Module):
|
433 |
+
def __init__(self, *args, vq_interface=False, **kwargs):
|
434 |
+
self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff
|
435 |
+
super().__init__()
|
436 |
+
|
437 |
+
def encode(self, x, *args, **kwargs):
|
438 |
+
return x
|
439 |
+
|
440 |
+
def decode(self, x, *args, **kwargs):
|
441 |
+
return x
|
442 |
+
|
443 |
+
def quantize(self, x, *args, **kwargs):
|
444 |
+
if self.vq_interface:
|
445 |
+
return x, None, [None, None, None]
|
446 |
+
return x
|
447 |
+
|
448 |
+
def forward(self, x, *args, **kwargs):
|
449 |
+
return x
|