Update app.py
Browse files
app.py
CHANGED
|
@@ -1,3 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import spaces
|
| 2 |
import os
|
| 3 |
import datetime
|
|
@@ -32,10 +63,10 @@ else:
|
|
| 32 |
from models.pasd.unet_2d_condition import UNet2DConditionModel
|
| 33 |
from models.pasd.controlnet import ControlNetModel
|
| 34 |
|
| 35 |
-
pretrained_model_path = "
|
| 36 |
-
ckpt_path = "runs/pasd/checkpoint-100000"
|
| 37 |
#dreambooth_lora_path = "checkpoints/personalized_models/toonyou_beta3.safetensors"
|
| 38 |
-
dreambooth_lora_path = "checkpoints/personalized_models/majicmixRealistic_v6.safetensors"
|
| 39 |
#dreambooth_lora_path = "checkpoints/personalized_models/Realistic_Vision_V5.1.safetensors"
|
| 40 |
weight_dtype = torch.float16
|
| 41 |
device = "cuda"
|
|
@@ -44,7 +75,7 @@ scheduler = UniPCMultistepScheduler.from_pretrained(pretrained_model_path, subfo
|
|
| 44 |
text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder")
|
| 45 |
tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
|
| 46 |
vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae")
|
| 47 |
-
feature_extractor = CLIPImageProcessor.from_pretrained(
|
| 48 |
unet = UNet2DConditionModel.from_pretrained(ckpt_path, subfolder="unet")
|
| 49 |
controlnet = ControlNetModel.from_pretrained(ckpt_path, subfolder="controlnet")
|
| 50 |
vae.requires_grad_(False)
|
|
@@ -191,7 +222,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 191 |
""")
|
| 192 |
with gr.Row():
|
| 193 |
with gr.Column():
|
| 194 |
-
input_image = gr.Image(type="filepath", sources=["upload"], value="samples/frog.png")
|
| 195 |
prompt_in = gr.Textbox(label="Prompt", value="Frog")
|
| 196 |
with gr.Accordion(label="Advanced settings", open=False):
|
| 197 |
added_prompt = gr.Textbox(label="Added Prompt", value='clean, high-resolution, 8k, best quality, masterpiece')
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import types
|
| 3 |
+
torch.cuda.get_device_capability = lambda *args, **kwargs: (8, 6)
|
| 4 |
+
torch.cuda.get_device_properties = lambda *args, **kwargs: types.SimpleNamespace(name='NVIDIA A10G', major=8, minor=6, total_memory=23836033024, multi_processor_count=80)
|
| 5 |
+
|
| 6 |
+
import huggingface_hub
|
| 7 |
+
huggingface_hub.snapshot_download(
|
| 8 |
+
repo_id='camenduru/PASD',
|
| 9 |
+
allow_patterns=[
|
| 10 |
+
'pasd/**',
|
| 11 |
+
'pasd_light/**',
|
| 12 |
+
'pasd_light_rrdb/**',
|
| 13 |
+
'pasd_rrdb/**',
|
| 14 |
+
],
|
| 15 |
+
local_dir='PASD/runs',
|
| 16 |
+
local_dir_use_symlinks=False,
|
| 17 |
+
)
|
| 18 |
+
huggingface_hub.hf_hub_download(
|
| 19 |
+
repo_id='camenduru/PASD',
|
| 20 |
+
filename='majicmixRealistic_v6.safetensors',
|
| 21 |
+
local_dir='PASD/checkpoints/personalized_models',
|
| 22 |
+
local_dir_use_symlinks=False,
|
| 23 |
+
)
|
| 24 |
+
huggingface_hub.hf_hub_download(
|
| 25 |
+
repo_id='akhaliq/RetinaFace-R50',
|
| 26 |
+
filename='RetinaFace-R50.pth',
|
| 27 |
+
local_dir='PASD/annotator/ckpts',
|
| 28 |
+
local_dir_use_symlinks=False,
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
import sys; sys.path.append('./PASD')
|
| 32 |
import spaces
|
| 33 |
import os
|
| 34 |
import datetime
|
|
|
|
| 63 |
from models.pasd.unet_2d_condition import UNet2DConditionModel
|
| 64 |
from models.pasd.controlnet import ControlNetModel
|
| 65 |
|
| 66 |
+
pretrained_model_path = "runwayml/stable-diffusion-v1-5"
|
| 67 |
+
ckpt_path = "PASD/runs/pasd/checkpoint-100000"
|
| 68 |
#dreambooth_lora_path = "checkpoints/personalized_models/toonyou_beta3.safetensors"
|
| 69 |
+
dreambooth_lora_path = "PASD/checkpoints/personalized_models/majicmixRealistic_v6.safetensors"
|
| 70 |
#dreambooth_lora_path = "checkpoints/personalized_models/Realistic_Vision_V5.1.safetensors"
|
| 71 |
weight_dtype = torch.float16
|
| 72 |
device = "cuda"
|
|
|
|
| 75 |
text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder")
|
| 76 |
tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
|
| 77 |
vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae")
|
| 78 |
+
feature_extractor = CLIPImageProcessor.from_pretrained(pretrained_model_path, subfolder="feature_extractor")
|
| 79 |
unet = UNet2DConditionModel.from_pretrained(ckpt_path, subfolder="unet")
|
| 80 |
controlnet = ControlNetModel.from_pretrained(ckpt_path, subfolder="controlnet")
|
| 81 |
vae.requires_grad_(False)
|
|
|
|
| 222 |
""")
|
| 223 |
with gr.Row():
|
| 224 |
with gr.Column():
|
| 225 |
+
input_image = gr.Image(type="filepath", sources=["upload"], value="PASD/samples/frog.png")
|
| 226 |
prompt_in = gr.Textbox(label="Prompt", value="Frog")
|
| 227 |
with gr.Accordion(label="Advanced settings", open=False):
|
| 228 |
added_prompt = gr.Textbox(label="Added Prompt", value='clean, high-resolution, 8k, best quality, masterpiece')
|