Spaces:
Running
Running
File size: 1,777 Bytes
328cff6 c003113 328cff6 01d5b32 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import gradio as gr
import numpy as np
import random
import torch
from helper.painter import Painter
from helper.trainer import Trainer
from helper.data_generator import DataGenerator
from helper.loader import Loader
from helper.cond_encoder import CLIPEncoder
from auto_encoder.models.variational_auto_encoder import VariationalAutoEncoder
from clip.models.ko_clip import KoCLIPWrapper
from diffusion_model.sampler.ddim import DDIM
from diffusion_model.models.latent_diffusion_model import LatentDiffusionModel
from diffusion_model.network.unet import Unet
from diffusion_model.network.unet_wrapper import UnetWrapper
# import spaces #[uncomment to use ZeroGPU]
device = "cuda" if torch.cuda.is_available() else "cpu"
if torch.cuda.is_available():
torch_dtype = torch.float16
else:
torch_dtype = torch.float32
if __name__ == "__main__":
from huggingface_hub import hf_hub_download
CONFIG_PATH = 'configs/composite_config.yaml'
repo_id = "JuyeopDang/KoFace-Diffusion"
filename = "composite_epoch2472.pth" # μ: "pytorch_model.pt" λλ "model.pt"
vae = VariationalAutoEncoder(CONFIG_PATH)
try:
# νμΌ λ€μ΄λ‘λ
# cache_dirμ μ§μ νλ©΄ λ€μ΄λ‘λλ νμΌμ΄ μ μ₯λ κ²½λ‘λ₯Ό μ μ΄ν μ μμ΅λλ€.
# κΈ°λ³Έμ μΌλ‘λ ~/.cache/huggingface/hub μ μ μ₯λ©λλ€.
model_path = hf_hub_download(repo_id=repo_id, filename=filename)
print(f"λͺ¨λΈ κ°μ€μΉ νμΌμ΄ λ€μ κ²½λ‘μ λ€μ΄λ‘λλμμ΅λλ€: {model_path}")
except Exception as e:
print(f"νμΌ λ€μ΄λ‘λ λλ λͺ¨λΈ λ‘λ μ€ μ€λ₯ λ°μ: {e}")
state_dict = torch.load(model_path, map_location='cuda')
vae.load_state_dict(state_dict['model_state_dict'])
print(vae) |