|
A=None |
|
e_sd_pt="ko.pth" |
|
d_sd_pt="ok.pth" |
|
import torch as t, torch.nn as nn, torch.nn.functional as F |
|
def C(n_in, n_out, **kwargs): |
|
return nn.Conv2d(n_in, n_out, 3, padding=1, **kwargs) |
|
class Clamp(nn.Module): |
|
def forward(self, x): |
|
return t.tanh(x / 3) * 3 |
|
class B(nn.Module): |
|
def __init__(self, n_in, n_out): |
|
super().__init__() |
|
self.conv = nn.Sequential(C(n_in, n_out), nn.ReLU(), C(n_out, n_out), nn.ReLU(), C(n_out, n_out)) |
|
self.skip = nn.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity() |
|
self.fuse = nn.ReLU() |
|
def forward(self, x): |
|
return self.fuse(self.conv(x) + self.skip(x)) |
|
def E(latent_channels=4): |
|
return nn.Sequential( |
|
C(3, 64), B(64, 64), |
|
C(64, 64, stride=2, bias=False), B(64, 64), B(64, 64), B(64, 64), |
|
C(64, 64, stride=2, bias=False), B(64, 64), B(64, 64), B(64, 64), |
|
C(64, 64, stride=2, bias=False), B(64, 64), B(64, 64), B(64, 64), |
|
C(64, latent_channels), |
|
) |
|
def D(latent_channels=16): |
|
return nn.Sequential( |
|
Clamp(), |
|
C(latent_channels, 48),nn.ReLU(),B(48, 48), B(48, 48), |
|
nn.Upsample(scale_factor=2), C(48, 48, bias=False),B(48, 48), B(48, 48), |
|
nn.Upsample(scale_factor=2), C(48, 48, bias=False),B(48, 48), |
|
nn.Upsample(scale_factor=2), C(48, 48, bias=False),B(48, 48), |
|
C(48, 3), |
|
) |
|
class M(nn.Module): |
|
lm, ls = 3, 0.5 |
|
def __init__(s, ep="encoder.pth", dp="decoder.pth", lc=None): |
|
super().__init__() |
|
if lc is None: lc = s.glc(str(ep)) |
|
s.e, s.d = E(lc), D(lc) |
|
def f(sd, mod, pfx): |
|
f_sd = {k.strip(pfx): v for k, v in sd.items() if k.strip(pfx) in mod.state_dict() and v.size() == mod.state_dict()[k.strip(pfx)].size()} |
|
mod.load_state_dict(f_sd, strict=False) |
|
if ep: f(t.load(ep, map_location="cpu", weights_only=True), s.e, "encoder.") |
|
if dp: f(t.load(dp, map_location="cpu", weights_only=True), s.d, "decoder.") |
|
s.e.requires_grad_(False) |
|
s.d.requires_grad_(False) |
|
def glc(s, ep): return 16 if "taef1" in ep or "taesd3" in ep else 4 |
|
@staticmethod |
|
def sl(x): return x.div(2 * M.lm).add(M.ls).clamp(0, 1) |
|
@staticmethod |
|
def ul(x): return x.sub(M.ls).mul(2 * M.lm) |
|
def forward(s, x, rl=False): |
|
l, o = s.e(x), s.d(s.e(x)) |
|
return (o.clamp(0, 1), l) if rl else o.clamp(0, 1) |
|
def filter_state_dict(model, name): |
|
state_dict = t.load(e_sd_pt if name=="E" else d_sd_pt, map_location="cpu", weights_only=True) |
|
prefix = 'encoder.' if name=="E" else 'decoder.' |
|
return {k.strip(prefix): v for k, v in state_dict.items() if k.strip(prefix) in model.state_dict() and v.size() == model.state_dict()[k.strip(prefix)].size()} |
|
def _load(model, name, dtype=t.bfloat16): |
|
model = E(16) if name=="E" else D(16) |
|
model.load_state_dict(filter_state_dict(model, name), strict=False) |
|
model.requires_grad_(False).to(dtype=dtype) |
|
return model |