Spaces:
Running
Running
import gradio as gr | |
import numpy as np | |
import random | |
import torch | |
from helper.painter import Painter | |
from helper.trainer import Trainer | |
from helper.data_generator import DataGenerator | |
from helper.loader import Loader | |
from helper.cond_encoder import CLIPEncoder | |
from auto_encoder.models.variational_auto_encoder import VariationalAutoEncoder | |
from clip.models.ko_clip import KoCLIPWrapper | |
from diffusion_model.sampler.ddim import DDIM | |
from diffusion_model.models.latent_diffusion_model import LatentDiffusionModel | |
from diffusion_model.network.unet import Unet | |
from diffusion_model.network.unet_wrapper import UnetWrapper | |
# import spaces #[uncomment to use ZeroGPU] | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
if torch.cuda.is_available(): | |
torch_dtype = torch.float16 | |
else: | |
torch_dtype = torch.float32 | |
if __name__ == "__main__": | |
from huggingface_hub import hf_hub_download | |
CONFIG_PATH = 'configs/composite_config.yaml' | |
repo_id = "JuyeopDang/KoFace-Diffusion" | |
filename = "composite_epoch2472.pth" # μ: "pytorch_model.pt" λλ "model.pt" | |
vae = VariationalAutoEncoder(CONFIG_PATH) | |
try: | |
# νμΌ λ€μ΄λ‘λ | |
# cache_dirμ μ§μ νλ©΄ λ€μ΄λ‘λλ νμΌμ΄ μ μ₯λ κ²½λ‘λ₯Ό μ μ΄ν μ μμ΅λλ€. | |
# κΈ°λ³Έμ μΌλ‘λ ~/.cache/huggingface/hub μ μ μ₯λ©λλ€. | |
model_path = hf_hub_download(repo_id=repo_id, filename=filename) | |
print(f"λͺ¨λΈ κ°μ€μΉ νμΌμ΄ λ€μ κ²½λ‘μ λ€μ΄λ‘λλμμ΅λλ€: {model_path}") | |
except Exception as e: | |
print(f"νμΌ λ€μ΄λ‘λ λλ λͺ¨λΈ λ‘λ μ€ μ€λ₯ λ°μ: {e}") | |
state_dict = torch.load(model_path, map_location='cuda') | |
vae.load_state_dict(state_dict['model_state_dict']) | |
print(vae) |