Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import os
|
2 |
os.system('git clone https://github.com/tencent-ailab/IP-Adapter.git')
|
|
|
3 |
os.system('mv IP-Adapter IP_Adapter')
|
4 |
os.system('ls IP_Adapter/ip_adapter')
|
5 |
import gradio as gr
|
@@ -16,7 +17,7 @@ base_model_path = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
|
16 |
vae_model_path = "stabilityai/sd-vae-ft-mse"
|
17 |
image_encoder_repo="InvokeAI/ip_adapter_sd_image_encoder"
|
18 |
image_encoder_path = "IP_Adapter/ip_adapter/models/image_encoder/"
|
19 |
-
ip_ckpt = "
|
20 |
device = "cpu" # or "cuda" if using GPU
|
21 |
|
22 |
# VAE and scheduler
|
@@ -46,7 +47,7 @@ def generate_variations(upload_img):
|
|
46 |
vae=vae,
|
47 |
feature_extractor=None,
|
48 |
safety_checker=None,
|
49 |
-
torch_dtype=torch.float16
|
50 |
)
|
51 |
ip_model = IPAdapter(pipe, image_encoder_repo, ip_ckpt, device)
|
52 |
images = ip_model.generate(pil_image=upload_img, num_samples=4, num_inference_steps=50, seed=42)
|
@@ -55,7 +56,7 @@ def generate_variations(upload_img):
|
|
55 |
def generate_img2img(base_img, guide_img):
|
56 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
57 |
base_model_path,
|
58 |
-
torch_dtype=torch.float16,
|
59 |
scheduler=noise_scheduler,
|
60 |
vae=vae,
|
61 |
feature_extractor=None,
|
@@ -68,7 +69,7 @@ def generate_img2img(base_img, guide_img):
|
|
68 |
def generate_inpaint(input_img, masked_img, mask_img):
|
69 |
pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained(
|
70 |
base_model_path,
|
71 |
-
torch_dtype=torch.float16,
|
72 |
scheduler=noise_scheduler,
|
73 |
vae=vae,
|
74 |
feature_extractor=None,
|
|
|
1 |
import os
|
2 |
os.system('git clone https://github.com/tencent-ailab/IP-Adapter.git')
|
3 |
+
os.system('wget https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15.bin')
|
4 |
os.system('mv IP-Adapter IP_Adapter')
|
5 |
os.system('ls IP_Adapter/ip_adapter')
|
6 |
import gradio as gr
|
|
|
17 |
vae_model_path = "stabilityai/sd-vae-ft-mse"
|
18 |
image_encoder_repo="InvokeAI/ip_adapter_sd_image_encoder"
|
19 |
image_encoder_path = "IP_Adapter/ip_adapter/models/image_encoder/"
|
20 |
+
ip_ckpt = "ip-adapter_sd15.bin"
|
21 |
device = "cpu" # or "cuda" if using GPU
|
22 |
|
23 |
# VAE and scheduler
|
|
|
47 |
vae=vae,
|
48 |
feature_extractor=None,
|
49 |
safety_checker=None,
|
50 |
+
#torch_dtype=torch.float16
|
51 |
)
|
52 |
ip_model = IPAdapter(pipe, image_encoder_repo, ip_ckpt, device)
|
53 |
images = ip_model.generate(pil_image=upload_img, num_samples=4, num_inference_steps=50, seed=42)
|
|
|
56 |
def generate_img2img(base_img, guide_img):
|
57 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
58 |
base_model_path,
|
59 |
+
#torch_dtype=torch.float16,
|
60 |
scheduler=noise_scheduler,
|
61 |
vae=vae,
|
62 |
feature_extractor=None,
|
|
|
69 |
def generate_inpaint(input_img, masked_img, mask_img):
|
70 |
pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained(
|
71 |
base_model_path,
|
72 |
+
#torch_dtype=torch.float16,
|
73 |
scheduler=noise_scheduler,
|
74 |
vae=vae,
|
75 |
feature_extractor=None,
|