Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -84,15 +84,31 @@ def preprocess_image(image, target_width, target_height, resize_to_224=False):
|
|
84 |
image = torch.from_numpy(image).to(device)
|
85 |
return image
|
86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
def get_depth_map(image, depth_estimator):
|
88 |
-
|
89 |
-
image
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
return depth_map
|
95 |
-
|
96 |
pipe_default = get_lora_sd_pipeline(lora_dir='lora_man_animestyle', base_model_name_or_path=model_default, dtype=torch_dtype).to(device)
|
97 |
|
98 |
# ----------------------------------------------------------------------------------------------------------------------------------------------------
|
@@ -424,9 +440,9 @@ def infer(
|
|
424 |
|
425 |
print('control_mode = ', control_mode)
|
426 |
|
427 |
-
# Преобразуем изображения
|
428 |
-
cn_source_image = preprocess_image(cn_source_image, width, height)
|
429 |
-
control_image = preprocess_image(control_image, width, height)
|
430 |
|
431 |
depth_estimator = pipeline("depth-estimation")
|
432 |
depth_map = get_depth_map(control_image, depth_estimator).unsqueeze(0).half().to(device)
|
|
|
84 |
image = torch.from_numpy(image).to(device)
|
85 |
return image
|
86 |
|
87 |
+
# def get_depth_map(image, depth_estimator):
|
88 |
+
# image = depth_estimator(image)["depth"]
|
89 |
+
# image = np.array(image)
|
90 |
+
# image = image[:, :, None]
|
91 |
+
# image = np.concatenate([image, image, image], axis=2)
|
92 |
+
# detected_map = torch.from_numpy(image).float() / 255.0
|
93 |
+
# depth_map = detected_map.permute(2, 0, 1)
|
94 |
+
# return depth_map
|
95 |
+
|
96 |
def get_depth_map(image, depth_estimator):
|
97 |
+
# Преобразуем изображение в PIL, если это необходимо
|
98 |
+
if isinstance(image, np.ndarray):
|
99 |
+
image = Image.fromarray(image)
|
100 |
+
elif isinstance(image, torch.Tensor):
|
101 |
+
image = Image.fromarray(image.cpu().numpy())
|
102 |
+
|
103 |
+
# Получаем карту глубины
|
104 |
+
depth_map = depth_estimator(image)["depth"]
|
105 |
+
depth_map = np.array(depth_map)
|
106 |
+
depth_map = depth_map[:, :, None] # Добавляем третье измерение
|
107 |
+
depth_map = np.concatenate([depth_map, depth_map, depth_map], axis=2) # Преобразуем в 3 канала
|
108 |
+
depth_map = torch.from_numpy(depth_map).float() / 255.0 # Нормализация [0, 1]
|
109 |
+
depth_map = depth_map.permute(2, 0, 1) # Меняем порядок осей (C, H, W)
|
110 |
return depth_map
|
111 |
+
|
112 |
pipe_default = get_lora_sd_pipeline(lora_dir='lora_man_animestyle', base_model_name_or_path=model_default, dtype=torch_dtype).to(device)
|
113 |
|
114 |
# ----------------------------------------------------------------------------------------------------------------------------------------------------
|
|
|
440 |
|
441 |
print('control_mode = ', control_mode)
|
442 |
|
443 |
+
# # Преобразуем изображения
|
444 |
+
# cn_source_image = preprocess_image(cn_source_image, width, height)
|
445 |
+
# control_image = preprocess_image(control_image, width, height)
|
446 |
|
447 |
depth_estimator = pipeline("depth-estimation")
|
448 |
depth_map = get_depth_map(control_image, depth_estimator).unsqueeze(0).half().to(device)
|