Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -12,12 +12,14 @@ from diffusion_model.sampler.ddim import DDIM
|
|
12 |
from diffusion_model.models.latent_diffusion_model import LatentDiffusionModel
|
13 |
from diffusion_model.network.unet import Unet
|
14 |
from diffusion_model.network.unet_wrapper import UnetWrapper
|
|
|
15 |
|
16 |
# import spaces #[uncomment to use ZeroGPU]
|
17 |
|
18 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
19 |
loader = Loader(device)
|
20 |
repo_id = "JuyeopDang/KoFace-Diffusion"
|
|
|
21 |
|
22 |
if torch.cuda.is_available():
|
23 |
torch_dtype = torch.float16
|
@@ -34,9 +36,6 @@ def load_model_from_HF(model, repo_id, filename, is_ema=False):
|
|
34 |
return model
|
35 |
|
36 |
if __name__ == "__main__":
|
37 |
-
from huggingface_hub import hf_hub_download
|
38 |
-
CONFIG_PATH = 'configs/composite_config.yaml'
|
39 |
-
|
40 |
vae = VariationalAutoEncoder(CONFIG_PATH)
|
41 |
sampler = DDIM(CONFIG_PATH)
|
42 |
clip = KoCLIPWrapper()
|
@@ -47,5 +46,21 @@ if __name__ == "__main__":
|
|
47 |
vae = load_model_from_HF(vae, repo_id, "composite_epoch2472.pth", False)
|
48 |
clip = load_model_from_HF(clip, repo_id, "asian-composite-fine-tuned-koclip.pth", True)
|
49 |
dm = load_model_from_HF(dm, repo_id, "asian-composite-clip-ldm.pth", True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
-
|
|
|
|
|
|
12 |
from diffusion_model.models.latent_diffusion_model import LatentDiffusionModel
|
13 |
from diffusion_model.network.unet import Unet
|
14 |
from diffusion_model.network.unet_wrapper import UnetWrapper
|
15 |
+
from huggingface_hub import hf_hub_download
|
16 |
|
17 |
# import spaces #[uncomment to use ZeroGPU]
|
18 |
|
19 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
20 |
loader = Loader(device)
|
21 |
repo_id = "JuyeopDang/KoFace-Diffusion"
|
22 |
+
CONFIG_PATH = 'configs/composite_config.yaml'
|
23 |
|
24 |
if torch.cuda.is_available():
|
25 |
torch_dtype = torch.float16
|
|
|
36 |
return model
|
37 |
|
38 |
if __name__ == "__main__":
|
|
|
|
|
|
|
39 |
vae = VariationalAutoEncoder(CONFIG_PATH)
|
40 |
sampler = DDIM(CONFIG_PATH)
|
41 |
clip = KoCLIPWrapper()
|
|
|
46 |
vae = load_model_from_HF(vae, repo_id, "composite_epoch2472.pth", False)
|
47 |
clip = load_model_from_HF(clip, repo_id, "asian-composite-fine-tuned-koclip.pth", True)
|
48 |
dm = load_model_from_HF(dm, repo_id, "asian-composite-clip-ldm.pth", True)
|
49 |
+
|
50 |
+
def generate_image(y, gamma, dm):
|
51 |
+
images = dm.sample(2, y = y, gamma = gamma)
|
52 |
+
images = images.permute(0, 2, 3, 1)
|
53 |
+
if type(images) is torch.Tensor:
|
54 |
+
images = images.detach().cpu().numpy()
|
55 |
+
images = np.clip(images / 2 + 0.5, 0, 1)
|
56 |
+
return im.fromarray((images[0] * 255).astype(np.uint8))
|
57 |
+
|
58 |
+
demo = gr.Interface(
|
59 |
+
generate_image,
|
60 |
+
inputs=["textbox", gr.Slider(0, 10)],
|
61 |
+
outputs=["image"],
|
62 |
+
)
|
63 |
|
64 |
+
demo.launch()
|
65 |
+
|
66 |
+
|