Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -61,14 +61,29 @@ def align_embeddings(prompt_embeds, negative_prompt_embeds):
|
|
61 |
return torch.nn.functional.pad(prompt_embeds, (0, 0, 0, max_length - prompt_embeds.shape[1])), \
|
62 |
torch.nn.functional.pad(negative_prompt_embeds, (0, 0, 0, max_length - negative_prompt_embeds.shape[1]))
|
63 |
|
64 |
-
def preprocess_image(image, target_width, target_height): # Преобразует изображение в формат, подходящий для модели.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
if isinstance(image, np.ndarray):
|
66 |
image = Image.fromarray(image)
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
image = np.array(image).astype(np.float32) / 255.0 # Нормализация [0, 1]
|
69 |
image = image[None].transpose(0, 3, 1, 2) # Преобразуем в (batch, channels, height, width)
|
70 |
image = torch.from_numpy(image).to(device)
|
71 |
-
return image
|
72 |
|
73 |
pipe_default = get_lora_sd_pipeline(lora_dir='./lora_man_animestyle', base_model_name_or_path=model_default, dtype=torch_dtype).to(device)
|
74 |
# pipe_controlnet = StableDiffusionControlNetPipeline.from_pretrained(
|
@@ -115,8 +130,11 @@ def infer(
|
|
115 |
pipe_ip_adapter.set_ip_adapter_scale(ip_adapter_strength)
|
116 |
|
117 |
# Преобразование изображений
|
118 |
-
ip_source_image = preprocess_image(ip_source_image, width, height)
|
119 |
-
ip_adapter_image = preprocess_image(ip_adapter_image, width, height)
|
|
|
|
|
|
|
120 |
|
121 |
# Получение эмбеддингов изображения для IP-Adapter
|
122 |
#image_embeds = pipe_ip_adapter.get_image_embeds(ip_adapter_image)
|
|
|
61 |
return torch.nn.functional.pad(prompt_embeds, (0, 0, 0, max_length - prompt_embeds.shape[1])), \
|
62 |
torch.nn.functional.pad(negative_prompt_embeds, (0, 0, 0, max_length - negative_prompt_embeds.shape[1]))
|
63 |
|
64 |
+
# def preprocess_image(image, target_width, target_height): # Преобразует изображение в формат, подходящий для модели.
|
65 |
+
# if isinstance(image, np.ndarray):
|
66 |
+
# image = Image.fromarray(image)
|
67 |
+
# image = image.resize((target_width, target_height), Image.LANCZOS)
|
68 |
+
# image = np.array(image).astype(np.float32) / 255.0 # Нормализация [0, 1]
|
69 |
+
# image = image[None].transpose(0, 3, 1, 2) # Преобразуем в (batch, channels, height, width)
|
70 |
+
# image = torch.from_numpy(image).to(device)
|
71 |
+
# return image
|
72 |
+
|
73 |
+
def preprocess_image(image, target_width, target_height, resize_to_224=False):
|
74 |
if isinstance(image, np.ndarray):
|
75 |
image = Image.fromarray(image)
|
76 |
+
|
77 |
+
# Если resize_to_224=True, изменяем размер до 224x224
|
78 |
+
if resize_to_224:
|
79 |
+
image = image.resize((224, 224), Image.LANCZOS)
|
80 |
+
else:
|
81 |
+
image = image.resize((target_width, target_height), Image.LANCZOS)
|
82 |
+
|
83 |
image = np.array(image).astype(np.float32) / 255.0 # Нормализация [0, 1]
|
84 |
image = image[None].transpose(0, 3, 1, 2) # Преобразуем в (batch, channels, height, width)
|
85 |
image = torch.from_numpy(image).to(device)
|
86 |
+
return image
|
87 |
|
88 |
pipe_default = get_lora_sd_pipeline(lora_dir='./lora_man_animestyle', base_model_name_or_path=model_default, dtype=torch_dtype).to(device)
|
89 |
# pipe_controlnet = StableDiffusionControlNetPipeline.from_pretrained(
|
|
|
130 |
pipe_ip_adapter.set_ip_adapter_scale(ip_adapter_strength)
|
131 |
|
132 |
# Преобразование изображений
|
133 |
+
#ip_source_image = preprocess_image(ip_source_image, width, height)
|
134 |
+
#ip_adapter_image = preprocess_image(ip_adapter_image, width, height)
|
135 |
+
# Преобразование изображений для IP-Adapter (размер 224x224)
|
136 |
+
ip_source_image = preprocess_image(ip_source_image, width, height, resize_to_224=True)
|
137 |
+
ip_adapter_image = preprocess_image(ip_adapter_image, width, height, resize_to_224=True)
|
138 |
|
139 |
# Получение эмбеддингов изображения для IP-Adapter
|
140 |
#image_embeds = pipe_ip_adapter.get_image_embeds(ip_adapter_image)
|