File size: 2,705 Bytes
413d4d0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import importlib
import torch
import json
import math
import os
import numpy as np

import torch.nn.functional as F

def new_module(config):
    '''in config:
            "target": module type
            "params": dict of params'''
    if type(config) == str:
        with open(config, 'r') as file:
            config = json.load(file)
    assert type(config) == dict
    if not "target" in config:
        raise KeyError("Expected key `target` to instantiate.")
    module, cls = config.get('target').rsplit(".", 1)
    model = getattr(importlib.import_module(module, package=__package__), cls)(**config.get("params", dict()))
    
    return model

def load_ckpt(model, path):
    sd = torch.load(path, map_location="cpu")['module']
    model.load_state_dict(sd, strict=False)
    return model

def load_default_HVQVAE():
    config = {
        "target": "..vqvae.HVQVAE",
        "params": {
            "levels": 3,
            "embedding_dim": 256,
            "codebook_scale": 1,
            "down_sampler_configs": [
                {
                    "target": "..vqvae.ResidualDownSample",
                    "params": {
                        "in_channels": 256
                    }
                },
                {
                    "target": "..vqvae.ResidualDownSample",
                    "params": {
                        "in_channels": 256
                    }
                }
            ],
            "enc_config": {
                    "target": "..vqvae.Encoder",
                    "params": {
                        "num_res_blocks": 2,
                        "channels_mult": [1,2,4]
                    }
            },
            "quantize_config": {
                    "target": "..vqvae.VectorQuantizeEMA",
                    "params": {
                        "hidden_dim": 256,
                        "embedding_dim": 256,
                        "n_embed": 20000,
                        "training_loc": False
                    }
            },
            "dec_configs": [
                {
                    "target": "..vqvae.Decoder",
                    "params": {
                        "channels_mult": [1,1,1,2,4]
                    }
                },
                {
                    "target": "..vqvae.Decoder",
                    "params": {
                        "channels_mult": [1,1,2,4]
                    }
                },
                {
                    "target": "..vqvae.Decoder",
                    "params": {
                        "channels_mult": [1,2,4]
                    }
                }
            ]
        }
    }
    return new_module(config)


if __name__ == '__main__':
    pass