Spaces:
Sleeping
Sleeping
position padding logic
Browse files- .gitignore +4 -1
- Task1-2.zip +0 -3
- app.py +315 -117
- app-v1.py → app_old.py +0 -0
- output.png +0 -0
- pixel_border.py +0 -50
- slider.py +0 -24
- test.py +0 -43
- test_bbox.py +28 -0
- test_bbox_thres.py +45 -0
- test_bria.py +23 -0
- test_logic.py +308 -0
.gitignore
CHANGED
@@ -1,4 +1,7 @@
|
|
1 |
venv/
|
2 |
temp_input/
|
3 |
temp_output/
|
4 |
-
processed_images.zip
|
|
|
|
|
|
|
|
1 |
venv/
|
2 |
temp_input/
|
3 |
temp_output/
|
4 |
+
processed_images.zip
|
5 |
+
test_input/
|
6 |
+
test_output/
|
7 |
+
bria_output/
|
Task1-2.zip
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:98299bfa9112b782fa4d66e06930e89e54930a8cd937ffd59e578909af2dde2e
|
3 |
-
size 625294
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -2,68 +2,15 @@ import os
|
|
2 |
import zipfile
|
3 |
import shutil
|
4 |
import time
|
5 |
-
from PIL import Image
|
6 |
import io
|
7 |
from rembg import remove
|
8 |
import gradio as gr
|
9 |
from concurrent.futures import ThreadPoolExecutor
|
10 |
from transformers import pipeline
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
def check_border_colors(image_path, tolerance):
|
16 |
-
image = Image.open(image_path)
|
17 |
-
pixels = image.load()
|
18 |
-
|
19 |
-
width, height = image.size
|
20 |
-
|
21 |
-
left_border_color = pixels[0, 0]
|
22 |
-
right_border_color = pixels[width - 1, 0]
|
23 |
-
|
24 |
-
for y in range(height):
|
25 |
-
if not colors_within_tolerance(pixels[0, y], left_border_color, tolerance):
|
26 |
-
return False
|
27 |
-
if not colors_within_tolerance(pixels[width - 1, y], right_border_color, tolerance):
|
28 |
-
return False
|
29 |
-
|
30 |
-
return True
|
31 |
-
|
32 |
-
def resize_and_crop_image(image_path, target_size=(1080, 1080), crop_mode='center'):
|
33 |
-
print(f"Resizing and cropping image: {image_path}")
|
34 |
-
with Image.open(image_path) as img:
|
35 |
-
width, height = img.size
|
36 |
-
print(f"Original image size: {width}x{height}")
|
37 |
-
|
38 |
-
scaling_factor = max(target_size[0] / width, target_size[1] / height)
|
39 |
-
|
40 |
-
new_size = (int(width * scaling_factor), int(height * scaling_factor))
|
41 |
-
resized_img = img.resize(new_size, Image.LANCZOS)
|
42 |
-
print(f"Resized image size: {new_size}")
|
43 |
-
|
44 |
-
if crop_mode == 'center':
|
45 |
-
left = (resized_img.width - target_size[0]) / 2
|
46 |
-
top = (resized_img.height - target_size[1]) / 2
|
47 |
-
elif crop_mode == 'top':
|
48 |
-
left = (resized_img.width - target_size[0]) / 2
|
49 |
-
top = 0
|
50 |
-
elif crop_mode == 'bottom':
|
51 |
-
left = (resized_img.width - target_size[0]) / 2
|
52 |
-
top = resized_img.height - target_size[1]
|
53 |
-
elif crop_mode == 'left':
|
54 |
-
left = 0
|
55 |
-
top = (resized_img.height - target_size[1]) / 2
|
56 |
-
elif crop_mode == 'right':
|
57 |
-
left = resized_img.width - target_size[0]
|
58 |
-
top = (resized_img.height - target_size[1]) / 2
|
59 |
-
|
60 |
-
right = left + target_size[0]
|
61 |
-
bottom = top + target_size[1]
|
62 |
-
|
63 |
-
cropped_img = resized_img.crop((left, top, right, bottom))
|
64 |
-
print(f"Cropped image size: {cropped_img.size}")
|
65 |
-
|
66 |
-
return cropped_img
|
67 |
|
68 |
def remove_background_rembg(input_path):
|
69 |
print(f"Removing background using rembg for image: {input_path}")
|
@@ -79,7 +26,266 @@ def remove_background_bria(input_path):
|
|
79 |
pillow_image = pipe(input_path)
|
80 |
return pillow_image
|
81 |
|
82 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
filename = os.path.basename(image_path)
|
84 |
try:
|
85 |
print(f"Processing image: {filename}")
|
@@ -91,55 +297,53 @@ def process_single_image(image_path, output_folder, crop_mode, bg_method, output
|
|
91 |
temp_image_path = os.path.join(output_folder, f"temp_{filename}")
|
92 |
image_with_no_bg.save(temp_image_path, format='PNG')
|
93 |
|
94 |
-
|
95 |
-
print(f"Border colors are the same for image: {filename}")
|
96 |
-
if bg_choice == 'transparent':
|
97 |
-
new_image = Image.new("RGBA", (1080, 1080), (255, 255, 255, 0))
|
98 |
-
else:
|
99 |
-
new_image = Image.new("RGBA", (1080, 1080), custom_color)
|
100 |
-
|
101 |
-
width, height = image_with_no_bg.size
|
102 |
-
scaling_factor = min(1080 / width, 1080 / height)
|
103 |
-
new_size = (int(width * scaling_factor), int(height * scaling_factor))
|
104 |
-
resized_img = image_with_no_bg.resize(new_size, Image.LANCZOS)
|
105 |
-
print(f"Resized image size: {new_size}")
|
106 |
-
new_image.paste(resized_img, ((1080 - resized_img.width) // 2, (1080 - resized_img.height) // 2))
|
107 |
-
else:
|
108 |
-
print(f"Border colors are different for image: {filename}")
|
109 |
-
new_image = resize_and_crop_image(temp_image_path, crop_mode=crop_mode)
|
110 |
|
|
|
111 |
if bg_choice == 'white':
|
112 |
-
|
113 |
-
white_bg = Image.new("RGBA", new_image.size, "WHITE")
|
114 |
-
new_image = Image.alpha_composite(white_bg, new_image)
|
115 |
elif bg_choice == 'custom':
|
116 |
-
|
117 |
-
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
images_paths = []
|
121 |
|
122 |
output_ext = 'jpg' if output_format == 'JPG' else 'png'
|
123 |
output_path_without_watermark = os.path.join(output_folder, f"without_watermark_{os.path.splitext(filename)[0]}.{output_ext}")
|
124 |
if output_format == 'JPG':
|
125 |
-
|
126 |
else:
|
127 |
-
|
128 |
images_paths.append((output_path_without_watermark, image_path))
|
129 |
|
130 |
if watermark_path:
|
131 |
watermark = Image.open(watermark_path).convert("RGBA")
|
132 |
-
|
133 |
-
|
134 |
output_path_with_watermark = os.path.join(output_folder, f"with_watermark_{os.path.splitext(filename)[0]}.{output_ext}")
|
135 |
if output_format == 'JPG':
|
136 |
-
|
137 |
else:
|
138 |
-
|
139 |
-
images_paths.append((output_path_with_watermark, image_path))
|
140 |
|
141 |
os.remove(temp_image_path)
|
142 |
|
|
|
|
|
|
|
|
|
143 |
print(f"Processed image paths: {images_paths}")
|
144 |
return images_paths
|
145 |
|
@@ -147,7 +351,7 @@ def process_single_image(image_path, output_folder, crop_mode, bg_method, output
|
|
147 |
print(f"Error processing {filename}: {e}")
|
148 |
return None
|
149 |
|
150 |
-
def process_images(input_files,
|
151 |
start_time = time.time()
|
152 |
|
153 |
output_folder = "temp_output"
|
@@ -159,6 +363,7 @@ def process_images(input_files, crop_mode='center', bg_method='rembg', watermark
|
|
159 |
original_images = []
|
160 |
|
161 |
if isinstance(input_files, str) and input_files.lower().endswith(('.zip', '.rar')):
|
|
|
162 |
input_folder = "temp_input"
|
163 |
if os.path.exists(input_folder):
|
164 |
shutil.rmtree(input_folder)
|
@@ -172,15 +377,19 @@ def process_images(input_files, crop_mode='center', bg_method='rembg', watermark
|
|
172 |
return [], None, 0
|
173 |
|
174 |
image_files = [os.path.join(input_folder, f) for f in os.listdir(input_folder) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.gif', '.webp'))]
|
|
|
|
|
|
|
175 |
else:
|
176 |
-
|
|
|
177 |
|
178 |
total_images = len(image_files)
|
179 |
print(f"Total images to process: {total_images}")
|
180 |
|
181 |
avg_processing_time = 0
|
182 |
with ThreadPoolExecutor(max_workers=num_workers) as executor:
|
183 |
-
future_to_image = {executor.submit(process_single_image, image_path, output_folder,
|
184 |
for idx, future in enumerate(future_to_image):
|
185 |
try:
|
186 |
start_time_image = time.time()
|
@@ -217,21 +426,24 @@ def process_images(input_files, crop_mode='center', bg_method='rembg', watermark
|
|
217 |
|
218 |
return original_images, processed_images, output_zip_path, processing_time
|
219 |
|
220 |
-
def gradio_interface(
|
221 |
progress = gr.Progress()
|
222 |
watermark_path = watermark.name if watermark else None
|
223 |
|
224 |
-
|
225 |
-
|
|
|
|
|
|
|
226 |
else:
|
227 |
-
return process_images(input_files,
|
228 |
|
229 |
def show_color_picker(bg_choice):
|
230 |
if bg_choice == 'custom':
|
231 |
return gr.update(visible=True)
|
232 |
return gr.update(visible=False)
|
233 |
|
234 |
-
def
|
235 |
if isinstance(evt.value, dict) and 'caption' in evt.value:
|
236 |
input_path = evt.value['caption']
|
237 |
output_path = evt.value['image']['path']
|
@@ -250,31 +462,20 @@ def update_slider(evt: gr.SelectData):
|
|
250 |
print("No caption found in selection")
|
251 |
return gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(value=None)
|
252 |
|
253 |
-
def process(
|
254 |
-
_, processed_images, zip_path, time_taken = gradio_interface(
|
255 |
processed_images_with_captions = [(img, f"Input: {caption}") for img, caption in processed_images]
|
256 |
return processed_images_with_captions, zip_path, f"{time_taken:.2f} seconds"
|
257 |
|
258 |
-
def update_input_type(choice):
|
259 |
-
if choice == "zip_rar":
|
260 |
-
return gr.update(visible=True), gr.update(visible=False)
|
261 |
-
else:
|
262 |
-
return gr.update(visible=False), gr.update(visible=True)
|
263 |
-
|
264 |
with gr.Blocks() as iface:
|
265 |
gr.Markdown("# Image Background Removal and Resizing with Optional Watermark")
|
266 |
gr.Markdown("Choose to upload multiple images or a ZIP/RAR file, select the crop mode, optionally upload a watermark image, and choose the output format.")
|
267 |
-
|
268 |
-
input_type = gr.Radio(choices=["multiple_images", "zip_rar"], label="Input Type", value="multiple_images")
|
269 |
|
270 |
with gr.Row():
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
watermark = gr.File(label="Upload Watermark Image (Optional)", file_types=[".png"])
|
275 |
|
276 |
with gr.Row():
|
277 |
-
crop_mode = gr.Radio(choices=["center", "top", "bottom", "left", "right"], label="Crop Mode", value="center")
|
278 |
output_format = gr.Radio(choices=["PNG", "JPG"], label="Output Format", value="JPG")
|
279 |
num_workers = gr.Slider(minimum=1, maximum=16, step=1, label="Number of Workers", value=5)
|
280 |
|
@@ -283,6 +484,8 @@ with gr.Blocks() as iface:
|
|
283 |
bg_choice = gr.Radio(choices=["transparent", "white", "custom"], label="Background Choice", value="white")
|
284 |
custom_color = gr.ColorPicker(label="Custom Background Color", value="#ffffff", visible=False)
|
285 |
|
|
|
|
|
286 |
with gr.Row():
|
287 |
gallery_processed = gr.Gallery(label="Processed Images")
|
288 |
with gr.Row():
|
@@ -295,13 +498,8 @@ with gr.Blocks() as iface:
|
|
295 |
output_zip = gr.File(label="Download Processed Images as ZIP")
|
296 |
processing_time = gr.Textbox(label="Processing Time (seconds)")
|
297 |
|
298 |
-
input_type.change(update_input_type, inputs=input_type, outputs=[zip_file, multiple_images])
|
299 |
-
|
300 |
bg_choice.change(show_color_picker, inputs=bg_choice, outputs=custom_color)
|
301 |
-
|
302 |
-
|
303 |
-
process_button.click(process, inputs=[input_type, multiple_images, crop_mode, bg_method, watermark, output_format, bg_choice, custom_color, num_workers], outputs=[gallery_processed, output_zip, processing_time])
|
304 |
-
|
305 |
-
gallery_processed.select(update_slider, outputs=[image_original, image_processed, original_ratio, processed_ratio])
|
306 |
|
307 |
iface.launch()
|
|
|
2 |
import zipfile
|
3 |
import shutil
|
4 |
import time
|
5 |
+
from PIL import Image, ImageDraw
|
6 |
import io
|
7 |
from rembg import remove
|
8 |
import gradio as gr
|
9 |
from concurrent.futures import ThreadPoolExecutor
|
10 |
from transformers import pipeline
|
11 |
+
import numpy as np
|
12 |
+
import json
|
13 |
+
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
def remove_background_rembg(input_path):
|
16 |
print(f"Removing background using rembg for image: {input_path}")
|
|
|
26 |
pillow_image = pipe(input_path)
|
27 |
return pillow_image
|
28 |
|
29 |
+
def get_bounding_box_with_threshold(image, threshold):
|
30 |
+
# Convert image to numpy array
|
31 |
+
img_array = np.array(image)
|
32 |
+
|
33 |
+
# Get alpha channel
|
34 |
+
alpha = img_array[:,:,3]
|
35 |
+
|
36 |
+
# Find rows and columns where alpha > threshold
|
37 |
+
rows = np.any(alpha > threshold, axis=1)
|
38 |
+
cols = np.any(alpha > threshold, axis=0)
|
39 |
+
|
40 |
+
# Find the bounding box
|
41 |
+
top, bottom = np.where(rows)[0][[0, -1]]
|
42 |
+
left, right = np.where(cols)[0][[0, -1]]
|
43 |
+
|
44 |
+
if left < right and top < bottom:
|
45 |
+
return (left, top, right, bottom)
|
46 |
+
else:
|
47 |
+
return None
|
48 |
+
|
49 |
+
def position_logic(image_path, use_threshold=True):
|
50 |
+
image = Image.open(image_path)
|
51 |
+
image = image.convert("RGBA")
|
52 |
+
|
53 |
+
# Get the bounding box of the non-blank area with threshold
|
54 |
+
if use_threshold:
|
55 |
+
bbox = get_bounding_box_with_threshold(image, threshold=10)
|
56 |
+
else:
|
57 |
+
bbox = image.getbbox()
|
58 |
+
log = []
|
59 |
+
|
60 |
+
if bbox:
|
61 |
+
# Check 1 pixel around the image for non-transparent pixels
|
62 |
+
width, height = image.size
|
63 |
+
cropped_sides = []
|
64 |
+
|
65 |
+
# Define tolerance for transparency
|
66 |
+
tolerance = 10 # Adjust this value as needed
|
67 |
+
|
68 |
+
# Check top edge
|
69 |
+
if any(image.getpixel((x, 0))[3] > tolerance for x in range(width)):
|
70 |
+
cropped_sides.append("top")
|
71 |
+
|
72 |
+
# Check bottom edge
|
73 |
+
if any(image.getpixel((x, height-1))[3] > tolerance for x in range(width)):
|
74 |
+
cropped_sides.append("bottom")
|
75 |
+
|
76 |
+
# Check left edge
|
77 |
+
if any(image.getpixel((0, y))[3] > tolerance for y in range(height)):
|
78 |
+
cropped_sides.append("left")
|
79 |
+
|
80 |
+
# Check right edge
|
81 |
+
if any(image.getpixel((width-1, y))[3] > tolerance for y in range(height)):
|
82 |
+
cropped_sides.append("right")
|
83 |
+
|
84 |
+
if cropped_sides:
|
85 |
+
info_message = f"Info for {os.path.basename(image_path)}: The following sides of the image may contain cropped objects: {', '.join(cropped_sides)}"
|
86 |
+
print(info_message)
|
87 |
+
log.append({"info": info_message})
|
88 |
+
else:
|
89 |
+
info_message = f"Info for {os.path.basename(image_path)}: The image is not cropped."
|
90 |
+
print(info_message)
|
91 |
+
log.append({"info": info_message})
|
92 |
+
|
93 |
+
# Crop the image to the bounding box
|
94 |
+
image = image.crop(bbox)
|
95 |
+
log.append({"action": "crop", "bbox": [str(bbox[0]), str(bbox[1]), str(bbox[2]), str(bbox[3])]})
|
96 |
+
|
97 |
+
# Calculate the new size to expand the image
|
98 |
+
padding = 125
|
99 |
+
target_size = 1080
|
100 |
+
aspect_ratio = image.width / image.height
|
101 |
+
|
102 |
+
if len(cropped_sides) == 4:
|
103 |
+
# If the image is cropped on all sides, center crop it to fit the canvas
|
104 |
+
if aspect_ratio > 1: # Landscape
|
105 |
+
new_height = target_size
|
106 |
+
new_width = int(new_height * aspect_ratio)
|
107 |
+
left = (new_width - target_size) // 2
|
108 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
109 |
+
image = image.crop((left, 0, left + target_size, target_size))
|
110 |
+
else: # Portrait or square
|
111 |
+
new_width = target_size
|
112 |
+
new_height = int(new_width / aspect_ratio)
|
113 |
+
top = (new_height - target_size) // 2
|
114 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
115 |
+
image = image.crop((0, top, target_size, top + target_size))
|
116 |
+
log.append({"action": "center_crop_resize", "new_size": f"{target_size}x{target_size}"})
|
117 |
+
x, y = 0, 0
|
118 |
+
elif not cropped_sides:
|
119 |
+
# If the image is not cropped, expand it from center until it touches the padding
|
120 |
+
new_height = 1080 - 2 * padding # Ensure it touches top and bottom padding
|
121 |
+
new_width = int(new_height * aspect_ratio)
|
122 |
+
|
123 |
+
if new_width > 1080 - 2 * padding:
|
124 |
+
# If width exceeds available space, adjust based on width
|
125 |
+
new_width = 1080 - 2 * padding
|
126 |
+
new_height = int(new_width / aspect_ratio)
|
127 |
+
|
128 |
+
# Resize the image
|
129 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
130 |
+
log.append({"action": "resize", "new_width": str(new_width), "new_height": str(new_height)})
|
131 |
+
|
132 |
+
x = (1080 - new_width) // 2
|
133 |
+
y = 1080 - new_height - padding
|
134 |
+
else:
|
135 |
+
# New logic for handling cropped top and left, or top and right
|
136 |
+
if set(cropped_sides) == {"top", "left"} or set(cropped_sides) == {"top", "right"}:
|
137 |
+
new_height = target_size - padding # Ensure bottom padding
|
138 |
+
new_width = int(new_height * aspect_ratio)
|
139 |
+
|
140 |
+
# If new width exceeds canvas width, adjust based on width
|
141 |
+
if new_width > target_size:
|
142 |
+
new_width = target_size
|
143 |
+
new_height = int(new_width / aspect_ratio)
|
144 |
+
|
145 |
+
# Resize the image
|
146 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
147 |
+
log.append({"action": "resize", "new_width": str(new_width), "new_height": str(new_height)})
|
148 |
+
|
149 |
+
# Set position
|
150 |
+
if "left" in cropped_sides:
|
151 |
+
x = 0
|
152 |
+
else: # right in cropped_sides
|
153 |
+
x = target_size - new_width
|
154 |
+
y = 0
|
155 |
+
|
156 |
+
# If the resized image is taller than the canvas minus padding, crop from the bottom
|
157 |
+
if new_height > target_size - padding:
|
158 |
+
crop_bottom = new_height - (target_size - padding)
|
159 |
+
image = image.crop((0, 0, new_width, new_height - crop_bottom))
|
160 |
+
new_height = target_size - padding
|
161 |
+
log.append({"action": "crop_vertical", "bottom_pixels_removed": str(crop_bottom)})
|
162 |
+
|
163 |
+
log.append({"action": "position", "x": str(x), "y": str(y)})
|
164 |
+
elif set(cropped_sides) == {"bottom", "left", "right"}:
|
165 |
+
# Expand the image from the center
|
166 |
+
new_width = target_size
|
167 |
+
new_height = int(new_width / aspect_ratio)
|
168 |
+
|
169 |
+
if new_height < target_size:
|
170 |
+
new_height = target_size
|
171 |
+
new_width = int(new_height * aspect_ratio)
|
172 |
+
|
173 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
174 |
+
|
175 |
+
# Crop to fit the canvas
|
176 |
+
left = (new_width - target_size) // 2
|
177 |
+
top = 0
|
178 |
+
image = image.crop((left, top, left + target_size, top + target_size))
|
179 |
+
|
180 |
+
log.append({"action": "expand_and_crop", "new_size": f"{target_size}x{target_size}"})
|
181 |
+
x, y = 0, 0
|
182 |
+
elif cropped_sides == ["top"]:
|
183 |
+
# New logic for handling only top-cropped images
|
184 |
+
if image.width > image.height:
|
185 |
+
new_width = target_size
|
186 |
+
new_height = int(target_size / aspect_ratio)
|
187 |
+
else:
|
188 |
+
new_height = target_size - padding # Ensure bottom padding
|
189 |
+
new_width = int(new_height * aspect_ratio)
|
190 |
+
|
191 |
+
# Resize the image
|
192 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
193 |
+
log.append({"action": "resize", "new_width": str(new_width), "new_height": str(new_height)})
|
194 |
+
|
195 |
+
x = (1080 - new_width) // 2
|
196 |
+
y = 0 # Align to top
|
197 |
+
|
198 |
+
# Apply padding only to non-cropped sides
|
199 |
+
x = max(padding, min(x, 1080 - new_width - padding))
|
200 |
+
elif cropped_sides in [["right"], ["left"]]:
|
201 |
+
# New logic for handling only right-cropped or left-cropped images
|
202 |
+
if image.width > image.height:
|
203 |
+
new_width = target_size - padding # Ensure padding on non-cropped side
|
204 |
+
new_height = int(new_width / aspect_ratio)
|
205 |
+
else:
|
206 |
+
new_height = target_size - padding # Ensure bottom padding
|
207 |
+
new_width = int(new_height * aspect_ratio)
|
208 |
+
|
209 |
+
# Resize the image
|
210 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
211 |
+
log.append({"action": "resize", "new_width": str(new_width), "new_height": str(new_height)})
|
212 |
+
|
213 |
+
if cropped_sides == ["right"]:
|
214 |
+
x = 1080 - new_width # Align to right
|
215 |
+
else: # cropped_sides == ["left"]
|
216 |
+
x = 0 # Align to left without padding
|
217 |
+
y = 1080 - new_height - padding # Respect bottom padding
|
218 |
+
elif set(cropped_sides) == {"left", "right"}:
|
219 |
+
# Logic for handling images cropped on both left and right sides
|
220 |
+
new_width = 1080 # Expand to full width of canvas
|
221 |
+
|
222 |
+
# Calculate the aspect ratio of the original image
|
223 |
+
aspect_ratio = image.width / image.height
|
224 |
+
|
225 |
+
# Calculate the new height while maintaining aspect ratio
|
226 |
+
new_height = int(new_width / aspect_ratio)
|
227 |
+
|
228 |
+
# Resize the image
|
229 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
230 |
+
log.append({"action": "resize", "new_width": str(new_width), "new_height": str(new_height)})
|
231 |
+
|
232 |
+
# Set horizontal position (always 0 as it spans full width)
|
233 |
+
x = 0
|
234 |
+
|
235 |
+
# Calculate vertical position to respect bottom padding
|
236 |
+
y = 1080 - new_height - padding
|
237 |
+
|
238 |
+
# If the resized image is taller than the canvas, crop from the top only
|
239 |
+
if new_height > 1080 - padding:
|
240 |
+
crop_top = new_height - (1080 - padding)
|
241 |
+
image = image.crop((0, crop_top, new_width, new_height))
|
242 |
+
new_height = 1080 - padding
|
243 |
+
y = 0
|
244 |
+
log.append({"action": "crop_vertical", "top_pixels_removed": str(crop_top)})
|
245 |
+
else:
|
246 |
+
# Align the image to the bottom with padding
|
247 |
+
y = 1080 - new_height - padding
|
248 |
+
|
249 |
+
log.append({"action": "position", "x": str(x), "y": str(y)})
|
250 |
+
else:
|
251 |
+
# Use the original resizing logic for other partially cropped images
|
252 |
+
if image.width > image.height:
|
253 |
+
new_width = target_size
|
254 |
+
new_height = int(target_size / aspect_ratio)
|
255 |
+
else:
|
256 |
+
new_height = target_size
|
257 |
+
new_width = int(target_size * aspect_ratio)
|
258 |
+
|
259 |
+
# Resize the image
|
260 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
261 |
+
log.append({"action": "resize", "new_width": str(new_width), "new_height": str(new_height)})
|
262 |
+
|
263 |
+
# Center horizontally for all images
|
264 |
+
x = (1080 - new_width) // 2
|
265 |
+
y = 1080 - new_height - padding
|
266 |
+
|
267 |
+
# Adjust positions for cropped sides
|
268 |
+
if "top" in cropped_sides:
|
269 |
+
y = 0
|
270 |
+
elif "bottom" in cropped_sides:
|
271 |
+
y = 1080 - new_height
|
272 |
+
if "left" in cropped_sides:
|
273 |
+
x = 0
|
274 |
+
elif "right" in cropped_sides:
|
275 |
+
x = 1080 - new_width
|
276 |
+
|
277 |
+
# Apply padding only to non-cropped sides, but keep horizontal centering
|
278 |
+
if "left" not in cropped_sides and "right" not in cropped_sides:
|
279 |
+
x = (1080 - new_width) // 2 # Always center horizontally
|
280 |
+
if "top" not in cropped_sides and "bottom" not in cropped_sides:
|
281 |
+
y = max(padding, min(y, 1080 - new_height - padding))
|
282 |
+
|
283 |
+
return log, image, x, y
|
284 |
+
|
285 |
+
def process_single_image(image_path, output_folder, bg_method, output_format, bg_choice, custom_color, watermark_path=None):
|
286 |
+
add_padding_line = False
|
287 |
+
padding = 125
|
288 |
+
|
289 |
filename = os.path.basename(image_path)
|
290 |
try:
|
291 |
print(f"Processing image: {filename}")
|
|
|
297 |
temp_image_path = os.path.join(output_folder, f"temp_{filename}")
|
298 |
image_with_no_bg.save(temp_image_path, format='PNG')
|
299 |
|
300 |
+
log, new_image, x, y = position_logic(temp_image_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
301 |
|
302 |
+
# Create a new 1080x1080 canvas with the appropriate background
|
303 |
if bg_choice == 'white':
|
304 |
+
canvas = Image.new("RGBA", (1080, 1080), "WHITE")
|
|
|
|
|
305 |
elif bg_choice == 'custom':
|
306 |
+
canvas = Image.new("RGBA", (1080, 1080), custom_color)
|
307 |
+
else: # transparent
|
308 |
+
canvas = Image.new("RGBA", (1080, 1080), (0, 0, 0, 0))
|
309 |
+
|
310 |
+
# Paste the resized image onto the canvas
|
311 |
+
canvas.paste(new_image, (x, y), new_image)
|
312 |
+
log.append({"action": "paste", "position": [str(x), str(y)]})
|
313 |
+
|
314 |
+
# Add visible black line for padding when background is not transparent
|
315 |
+
if add_padding_line:
|
316 |
+
draw = ImageDraw.Draw(canvas)
|
317 |
+
draw.rectangle([padding, padding, 1080 - padding, 1080 - padding], outline="black", width=5)
|
318 |
+
log.append({"action": "add_padding_line"})
|
319 |
|
320 |
images_paths = []
|
321 |
|
322 |
output_ext = 'jpg' if output_format == 'JPG' else 'png'
|
323 |
output_path_without_watermark = os.path.join(output_folder, f"without_watermark_{os.path.splitext(filename)[0]}.{output_ext}")
|
324 |
if output_format == 'JPG':
|
325 |
+
canvas.convert('RGB').save(output_path_without_watermark, format='JPEG')
|
326 |
else:
|
327 |
+
canvas.save(output_path_without_watermark, format='PNG')
|
328 |
images_paths.append((output_path_without_watermark, image_path))
|
329 |
|
330 |
if watermark_path:
|
331 |
watermark = Image.open(watermark_path).convert("RGBA")
|
332 |
+
canvas_with_watermark = canvas.copy()
|
333 |
+
canvas_with_watermark.paste(watermark, (0, 0), watermark)
|
334 |
output_path_with_watermark = os.path.join(output_folder, f"with_watermark_{os.path.splitext(filename)[0]}.{output_ext}")
|
335 |
if output_format == 'JPG':
|
336 |
+
canvas_with_watermark.convert('RGB').save(output_path_with_watermark, format='JPEG')
|
337 |
else:
|
338 |
+
canvas_with_watermark.save(output_path_with_watermark, format='PNG')
|
339 |
+
images_paths.append((output_path_with_watermark, image_path))
|
340 |
|
341 |
os.remove(temp_image_path)
|
342 |
|
343 |
+
with open(os.path.join(output_folder, 'process_log.json'), 'w') as log_file:
|
344 |
+
json.dump(log, log_file, indent=4)
|
345 |
+
print("Log saved to", os.path.join(output_folder, 'process_log.json'))
|
346 |
+
|
347 |
print(f"Processed image paths: {images_paths}")
|
348 |
return images_paths
|
349 |
|
|
|
351 |
print(f"Error processing {filename}: {e}")
|
352 |
return None
|
353 |
|
354 |
+
def process_images(input_files, bg_method='rembg', watermark_path=None, output_format='PNG', bg_choice='transparent', custom_color="#ffffff", num_workers=4, progress=gr.Progress()):
|
355 |
start_time = time.time()
|
356 |
|
357 |
output_folder = "temp_output"
|
|
|
363 |
original_images = []
|
364 |
|
365 |
if isinstance(input_files, str) and input_files.lower().endswith(('.zip', '.rar')):
|
366 |
+
# Handle zip file
|
367 |
input_folder = "temp_input"
|
368 |
if os.path.exists(input_folder):
|
369 |
shutil.rmtree(input_folder)
|
|
|
377 |
return [], None, 0
|
378 |
|
379 |
image_files = [os.path.join(input_folder, f) for f in os.listdir(input_folder) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.gif', '.webp'))]
|
380 |
+
elif isinstance(input_files, list):
|
381 |
+
# Handle multiple files
|
382 |
+
image_files = input_files
|
383 |
else:
|
384 |
+
# Handle single file
|
385 |
+
image_files = [input_files]
|
386 |
|
387 |
total_images = len(image_files)
|
388 |
print(f"Total images to process: {total_images}")
|
389 |
|
390 |
avg_processing_time = 0
|
391 |
with ThreadPoolExecutor(max_workers=num_workers) as executor:
|
392 |
+
future_to_image = {executor.submit(process_single_image, image_path, output_folder, bg_method, output_format, bg_choice, custom_color, watermark_path): image_path for image_path in image_files}
|
393 |
for idx, future in enumerate(future_to_image):
|
394 |
try:
|
395 |
start_time_image = time.time()
|
|
|
426 |
|
427 |
return original_images, processed_images, output_zip_path, processing_time
|
428 |
|
429 |
+
def gradio_interface(input_files, bg_method, watermark, output_format, bg_choice, custom_color, num_workers):
|
430 |
progress = gr.Progress()
|
431 |
watermark_path = watermark.name if watermark else None
|
432 |
|
433 |
+
# Check input_files, is it single image, list image, or zip/rar
|
434 |
+
if isinstance(input_files, str) and input_files.lower().endswith(('.zip', '.rar')):
|
435 |
+
return process_images(input_files, bg_method, watermark_path, output_format, bg_choice, custom_color, num_workers, progress)
|
436 |
+
elif isinstance(input_files, list):
|
437 |
+
return process_images(input_files, bg_method, watermark_path, output_format, bg_choice, custom_color, num_workers, progress)
|
438 |
else:
|
439 |
+
return process_images(input_files.name, bg_method, watermark_path, output_format, bg_choice, custom_color, num_workers, progress)
|
440 |
|
441 |
def show_color_picker(bg_choice):
|
442 |
if bg_choice == 'custom':
|
443 |
return gr.update(visible=True)
|
444 |
return gr.update(visible=False)
|
445 |
|
446 |
+
def update_compare(evt: gr.SelectData):
|
447 |
if isinstance(evt.value, dict) and 'caption' in evt.value:
|
448 |
input_path = evt.value['caption']
|
449 |
output_path = evt.value['image']['path']
|
|
|
462 |
print("No caption found in selection")
|
463 |
return gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(value=None)
|
464 |
|
465 |
+
def process(input_files, bg_method, watermark, output_format, bg_choice, custom_color, num_workers):
|
466 |
+
_, processed_images, zip_path, time_taken = gradio_interface(input_files, bg_method, watermark, output_format, bg_choice, custom_color, num_workers)
|
467 |
processed_images_with_captions = [(img, f"Input: {caption}") for img, caption in processed_images]
|
468 |
return processed_images_with_captions, zip_path, f"{time_taken:.2f} seconds"
|
469 |
|
|
|
|
|
|
|
|
|
|
|
|
|
470 |
with gr.Blocks() as iface:
|
471 |
gr.Markdown("# Image Background Removal and Resizing with Optional Watermark")
|
472 |
gr.Markdown("Choose to upload multiple images or a ZIP/RAR file, select the crop mode, optionally upload a watermark image, and choose the output format.")
|
|
|
|
|
473 |
|
474 |
with gr.Row():
|
475 |
+
input_files = gr.File(label="Upload Image or ZIP/RAR file", file_types=[".zip", ".rar", "image"], interactive=True)
|
476 |
+
watermark = gr.File(label="Upload Watermark Image (Optional)", file_types=[".png"])
|
|
|
|
|
477 |
|
478 |
with gr.Row():
|
|
|
479 |
output_format = gr.Radio(choices=["PNG", "JPG"], label="Output Format", value="JPG")
|
480 |
num_workers = gr.Slider(minimum=1, maximum=16, step=1, label="Number of Workers", value=5)
|
481 |
|
|
|
484 |
bg_choice = gr.Radio(choices=["transparent", "white", "custom"], label="Background Choice", value="white")
|
485 |
custom_color = gr.ColorPicker(label="Custom Background Color", value="#ffffff", visible=False)
|
486 |
|
487 |
+
process_button = gr.Button("Process Images")
|
488 |
+
|
489 |
with gr.Row():
|
490 |
gallery_processed = gr.Gallery(label="Processed Images")
|
491 |
with gr.Row():
|
|
|
498 |
output_zip = gr.File(label="Download Processed Images as ZIP")
|
499 |
processing_time = gr.Textbox(label="Processing Time (seconds)")
|
500 |
|
|
|
|
|
501 |
bg_choice.change(show_color_picker, inputs=bg_choice, outputs=custom_color)
|
502 |
+
process_button.click(process, inputs=[input_files, bg_method, watermark, output_format, bg_choice, custom_color, num_workers], outputs=[gallery_processed, output_zip, processing_time])
|
503 |
+
gallery_processed.select(update_compare, outputs=[image_original, image_processed, original_ratio, processed_ratio])
|
|
|
|
|
|
|
504 |
|
505 |
iface.launch()
|
app-v1.py → app_old.py
RENAMED
File without changes
|
output.png
DELETED
Binary file (175 kB)
|
|
pixel_border.py
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import shutil
|
3 |
-
from PIL import Image
|
4 |
-
|
5 |
-
def colors_within_tolerance(color1, color2, tolerance):
|
6 |
-
return all(abs(c1 - c2) <= tolerance for c1, c2 in zip(color1, color2))
|
7 |
-
|
8 |
-
def check_border_colors(image_path, tolerance):
|
9 |
-
# Open the image
|
10 |
-
image = Image.open(image_path)
|
11 |
-
pixels = image.load()
|
12 |
-
|
13 |
-
width, height = image.size
|
14 |
-
|
15 |
-
# Get the color of the first pixel on the left and right borders
|
16 |
-
left_border_color = pixels[0, 0]
|
17 |
-
right_border_color = pixels[width - 1, 0]
|
18 |
-
|
19 |
-
# Check the left border
|
20 |
-
for y in range(height):
|
21 |
-
if not colors_within_tolerance(pixels[0, y], left_border_color, tolerance):
|
22 |
-
return False
|
23 |
-
|
24 |
-
# Check the right border
|
25 |
-
for y in range(height):
|
26 |
-
if not colors_within_tolerance(pixels[width - 1, y], right_border_color, tolerance):
|
27 |
-
return False
|
28 |
-
|
29 |
-
return True
|
30 |
-
|
31 |
-
def process_images(input_folder, output_folder_same, output_folder_different, tolerance):
|
32 |
-
# Ensure output directories exist
|
33 |
-
os.makedirs(output_folder_same, exist_ok=True)
|
34 |
-
os.makedirs(output_folder_different, exist_ok=True)
|
35 |
-
|
36 |
-
for filename in os.listdir(input_folder):
|
37 |
-
if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.gif')):
|
38 |
-
image_path = os.path.join(input_folder, filename)
|
39 |
-
if check_border_colors(image_path, tolerance):
|
40 |
-
shutil.copy(image_path, os.path.join(output_folder_same, filename))
|
41 |
-
else:
|
42 |
-
shutil.copy(image_path, os.path.join(output_folder_different, filename))
|
43 |
-
|
44 |
-
# Example usage
|
45 |
-
input_folder = 'D:\Ardha\Kerja\AISensum\ROX\Input\Task 1'
|
46 |
-
output_folder_same = 'D:\Ardha\Kerja\AISensum\ROX\Input\Task 1\warna_sama'
|
47 |
-
output_folder_different = 'D:\Ardha\Kerja\AISensum\ROX\Input\Task 1\warna_berbeda'
|
48 |
-
tolerance = 50 # Adjust the tolerance value as needed
|
49 |
-
|
50 |
-
process_images(input_folder, output_folder_same, output_folder_different, tolerance)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
slider.py
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from gradio_imageslider import ImageSlider
|
3 |
-
|
4 |
-
def compare_images(img1, img2):
|
5 |
-
if not img1 or not img2:
|
6 |
-
return None
|
7 |
-
return (img1, img2)
|
8 |
-
|
9 |
-
with gr.Blocks() as demo:
|
10 |
-
with gr.Group():
|
11 |
-
with gr.Row():
|
12 |
-
image_input1 = gr.Image(label="Upload Image 1", type="pil")
|
13 |
-
image_input2 = gr.Image(label="Upload Image 2", type="pil")
|
14 |
-
|
15 |
-
slider = ImageSlider(label="Compare Images", type="pil", slider_color="pink", width=600)
|
16 |
-
|
17 |
-
def update_slider(img1, img2):
|
18 |
-
return gr.update(value=(img1, img2))
|
19 |
-
|
20 |
-
image_input1.change(update_slider, inputs=[image_input1, image_input2], outputs=slider)
|
21 |
-
image_input2.change(update_slider, inputs=[image_input1, image_input2], outputs=slider)
|
22 |
-
|
23 |
-
if __name__ == "__main__":
|
24 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
test.py
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
from PIL import Image
|
2 |
-
|
3 |
-
def remove_blank_zone(image_path, output_path):
|
4 |
-
image = Image.open(image_path)
|
5 |
-
image = image.convert("RGBA")
|
6 |
-
|
7 |
-
# Get the bounding box of the non-blank area
|
8 |
-
bbox = image.getbbox()
|
9 |
-
|
10 |
-
if bbox:
|
11 |
-
# Crop the image to the bounding box
|
12 |
-
image = image.crop(bbox)
|
13 |
-
|
14 |
-
# Calculate the new size to expand the image until one side touches the padding
|
15 |
-
padding = 125
|
16 |
-
target_size = 1080 - 2 * padding
|
17 |
-
aspect_ratio = image.width / image.height
|
18 |
-
|
19 |
-
if image.width > image.height:
|
20 |
-
new_width = target_size
|
21 |
-
new_height = int(target_size / aspect_ratio)
|
22 |
-
else:
|
23 |
-
new_height = target_size
|
24 |
-
new_width = int(target_size * aspect_ratio)
|
25 |
-
|
26 |
-
# Resize the image
|
27 |
-
image = image.resize((new_width, new_height), Image.LANCZOS)
|
28 |
-
|
29 |
-
# Create a new 1080x1080 canvas with a white background
|
30 |
-
canvas = Image.new("RGBA", (1080, 1080), (255, 255, 255, 255))
|
31 |
-
|
32 |
-
# Calculate the position to paste the resized image onto the canvas
|
33 |
-
x = (1080 - new_width) // 2
|
34 |
-
y = (1080 - new_height) // 2
|
35 |
-
|
36 |
-
# Paste the resized image onto the canvas
|
37 |
-
canvas.paste(image, (x, y), image)
|
38 |
-
|
39 |
-
# Save the final image
|
40 |
-
canvas.save(output_path)
|
41 |
-
|
42 |
-
# Example usage
|
43 |
-
remove_blank_zone("stample.png", "output.png")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
test_bbox.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image, ImageDraw
|
2 |
+
|
3 |
+
def draw_bounding_box(input_path, output_path):
|
4 |
+
# Open the image and convert to RGBA
|
5 |
+
image = Image.open(input_path).convert("RGBA")
|
6 |
+
|
7 |
+
# Get the bounding box of the non-blank area
|
8 |
+
bbox = image.getbbox()
|
9 |
+
|
10 |
+
if bbox:
|
11 |
+
# Create a copy of the image to draw on
|
12 |
+
draw_image = image.copy()
|
13 |
+
draw = ImageDraw.Draw(draw_image)
|
14 |
+
|
15 |
+
# Draw the bounding box
|
16 |
+
draw.rectangle(bbox, outline="red", width=3)
|
17 |
+
|
18 |
+
# Save the image with the bounding box
|
19 |
+
draw_image.save(output_path)
|
20 |
+
print(f"Processed image: Bounding box {bbox}")
|
21 |
+
else:
|
22 |
+
print("No non-blank area found in the image")
|
23 |
+
|
24 |
+
# Example usage
|
25 |
+
input_image = "bria_output/1000416735_02.png"
|
26 |
+
output_image = "bbox_image.png"
|
27 |
+
draw_bounding_box(input_image, output_image)
|
28 |
+
|
test_bbox_thres.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image, ImageDraw
|
2 |
+
|
3 |
+
def draw_bounding_box_with_threshold(input_path, output_path, threshold=0):
|
4 |
+
# Open the image and convert to RGBA
|
5 |
+
image = Image.open(input_path).convert("RGBA")
|
6 |
+
|
7 |
+
# Get the bounding box of the non-blank area with threshold
|
8 |
+
bbox = get_bounding_box_with_threshold(image, threshold)
|
9 |
+
|
10 |
+
if bbox:
|
11 |
+
# Create a copy of the image to draw on
|
12 |
+
draw_image = image.copy()
|
13 |
+
draw = ImageDraw.Draw(draw_image)
|
14 |
+
|
15 |
+
# Draw the bounding box
|
16 |
+
draw.rectangle(bbox, outline="red", width=3)
|
17 |
+
|
18 |
+
# Save the image with the bounding box
|
19 |
+
draw_image.save(output_path)
|
20 |
+
print(f"Processed image: Bounding box {bbox}")
|
21 |
+
else:
|
22 |
+
print("No non-blank area found in the image")
|
23 |
+
|
24 |
+
def get_bounding_box_with_threshold(image, threshold):
|
25 |
+
width, height = image.size
|
26 |
+
left, top, right, bottom = width, height, 0, 0
|
27 |
+
|
28 |
+
for y in range(height):
|
29 |
+
for x in range(width):
|
30 |
+
r, g, b, a = image.getpixel((x, y))
|
31 |
+
if a > threshold:
|
32 |
+
left = min(left, x)
|
33 |
+
top = min(top, y)
|
34 |
+
right = max(right, x)
|
35 |
+
bottom = max(bottom, y)
|
36 |
+
|
37 |
+
if left < right and top < bottom:
|
38 |
+
return (left, top, right, bottom)
|
39 |
+
else:
|
40 |
+
return None
|
41 |
+
|
42 |
+
# Example usage
|
43 |
+
input_image = "bria_output/1000485417_03.png"
|
44 |
+
output_image = "bbox_image_thres.png"
|
45 |
+
draw_bounding_box_with_threshold(input_image, output_image, threshold=10)
|
test_bria.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from transformers import pipeline
|
3 |
+
|
4 |
+
def remove_background_with_bria(input_folder, output_folder):
|
5 |
+
if not os.path.exists(output_folder):
|
6 |
+
os.makedirs(output_folder)
|
7 |
+
|
8 |
+
pipe = pipeline("image-segmentation", model="briaai/RMBG-1.4", trust_remote_code=True)
|
9 |
+
|
10 |
+
for filename in os.listdir(input_folder):
|
11 |
+
if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp')):
|
12 |
+
input_path = os.path.join(input_folder, filename)
|
13 |
+
output_path = os.path.join(output_folder, os.path.splitext(filename)[0] + '.png')
|
14 |
+
|
15 |
+
# Use pipeline to remove the background
|
16 |
+
pillow_image = pipe(input_path) # applies mask on input and returns a pillow image
|
17 |
+
pillow_image.save(output_path, format="PNG")
|
18 |
+
|
19 |
+
# Print the filename of the processed image
|
20 |
+
print(f"Processed {filename}")
|
21 |
+
|
22 |
+
# Example usage
|
23 |
+
remove_background_with_bria("D:\Ardha\Kerja\AISensum\ROX\Task 1.2\Raw_PSD", "bria_output")
|
test_logic.py
ADDED
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
from PIL import Image, ImageDraw
|
4 |
+
import concurrent.futures
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
def remove_blank_zone(input_folder, output_folder):
|
8 |
+
if not os.path.exists(output_folder):
|
9 |
+
os.makedirs(output_folder)
|
10 |
+
|
11 |
+
log = []
|
12 |
+
try:
|
13 |
+
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
|
14 |
+
futures = []
|
15 |
+
for filename in os.listdir(input_folder):
|
16 |
+
if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp')):
|
17 |
+
input_path = os.path.join(input_folder, filename)
|
18 |
+
output_path = os.path.join(output_folder, filename)
|
19 |
+
future = executor.submit(process_image, input_path, output_path)
|
20 |
+
futures.append((filename, future))
|
21 |
+
|
22 |
+
for filename, future in futures:
|
23 |
+
image_log = future.result()
|
24 |
+
log.append({
|
25 |
+
"image": filename,
|
26 |
+
"actions": image_log
|
27 |
+
})
|
28 |
+
|
29 |
+
with open(os.path.join(output_folder, 'process_log.json'), 'w') as log_file:
|
30 |
+
json.dump(log, log_file, indent=4)
|
31 |
+
print("Log saved to", os.path.join(output_folder, 'process_log.json'))
|
32 |
+
except Exception as e:
|
33 |
+
print("Error:", e)
|
34 |
+
|
35 |
+
def get_bounding_box_with_threshold(image, threshold):
|
36 |
+
# Convert image to numpy array
|
37 |
+
img_array = np.array(image)
|
38 |
+
|
39 |
+
# Get alpha channel
|
40 |
+
alpha = img_array[:,:,3]
|
41 |
+
|
42 |
+
# Find rows and columns where alpha > threshold
|
43 |
+
rows = np.any(alpha > threshold, axis=1)
|
44 |
+
cols = np.any(alpha > threshold, axis=0)
|
45 |
+
|
46 |
+
# Find the bounding box
|
47 |
+
top, bottom = np.where(rows)[0][[0, -1]]
|
48 |
+
left, right = np.where(cols)[0][[0, -1]]
|
49 |
+
|
50 |
+
if left < right and top < bottom:
|
51 |
+
return (left, top, right, bottom)
|
52 |
+
else:
|
53 |
+
return None
|
54 |
+
|
55 |
+
def process_image(image_path, output_path, add_padding_line=False, use_threshold=True):
|
56 |
+
image = Image.open(image_path)
|
57 |
+
image = image.convert("RGBA")
|
58 |
+
|
59 |
+
# Get the bounding box of the non-blank area with threshold
|
60 |
+
if use_threshold:
|
61 |
+
bbox = get_bounding_box_with_threshold(image, threshold=10)
|
62 |
+
else:
|
63 |
+
bbox = image.getbbox()
|
64 |
+
log = []
|
65 |
+
|
66 |
+
if bbox:
|
67 |
+
# Check 1 pixel around the image for non-transparent pixels
|
68 |
+
width, height = image.size
|
69 |
+
cropped_sides = []
|
70 |
+
|
71 |
+
# Define tolerance for transparency
|
72 |
+
tolerance = 10 # Adjust this value as needed
|
73 |
+
|
74 |
+
# Check top edge
|
75 |
+
if any(image.getpixel((x, 0))[3] > tolerance for x in range(width)):
|
76 |
+
cropped_sides.append("top")
|
77 |
+
|
78 |
+
# Check bottom edge
|
79 |
+
if any(image.getpixel((x, height-1))[3] > tolerance for x in range(width)):
|
80 |
+
cropped_sides.append("bottom")
|
81 |
+
|
82 |
+
# Check left edge
|
83 |
+
if any(image.getpixel((0, y))[3] > tolerance for y in range(height)):
|
84 |
+
cropped_sides.append("left")
|
85 |
+
|
86 |
+
# Check right edge
|
87 |
+
if any(image.getpixel((width-1, y))[3] > tolerance for y in range(height)):
|
88 |
+
cropped_sides.append("right")
|
89 |
+
|
90 |
+
if cropped_sides:
|
91 |
+
info_message = f"Info for {os.path.basename(image_path)}: The following sides of the image may contain cropped objects: {', '.join(cropped_sides)}"
|
92 |
+
print(info_message)
|
93 |
+
log.append({"info": info_message})
|
94 |
+
else:
|
95 |
+
info_message = f"Info for {os.path.basename(image_path)}: The image is not cropped."
|
96 |
+
print(info_message)
|
97 |
+
log.append({"info": info_message})
|
98 |
+
|
99 |
+
# Crop the image to the bounding box
|
100 |
+
image = image.crop(bbox)
|
101 |
+
log.append({"action": "crop", "bbox": [str(bbox[0]), str(bbox[1]), str(bbox[2]), str(bbox[3])]})
|
102 |
+
|
103 |
+
# Calculate the new size to expand the image
|
104 |
+
padding = 125
|
105 |
+
target_size = 1080
|
106 |
+
aspect_ratio = image.width / image.height
|
107 |
+
|
108 |
+
if len(cropped_sides) == 4:
|
109 |
+
# If the image is cropped on all sides, center crop it to fit the canvas
|
110 |
+
if aspect_ratio > 1: # Landscape
|
111 |
+
new_height = target_size
|
112 |
+
new_width = int(new_height * aspect_ratio)
|
113 |
+
left = (new_width - target_size) // 2
|
114 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
115 |
+
image = image.crop((left, 0, left + target_size, target_size))
|
116 |
+
else: # Portrait or square
|
117 |
+
new_width = target_size
|
118 |
+
new_height = int(new_width / aspect_ratio)
|
119 |
+
top = (new_height - target_size) // 2
|
120 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
121 |
+
image = image.crop((0, top, target_size, top + target_size))
|
122 |
+
log.append({"action": "center_crop_resize", "new_size": f"{target_size}x{target_size}"})
|
123 |
+
x, y = 0, 0
|
124 |
+
elif not cropped_sides:
|
125 |
+
# If the image is not cropped, expand it from center until it touches the padding
|
126 |
+
new_height = 1080 - 2 * padding # Ensure it touches top and bottom padding
|
127 |
+
new_width = int(new_height * aspect_ratio)
|
128 |
+
|
129 |
+
if new_width > 1080 - 2 * padding:
|
130 |
+
# If width exceeds available space, adjust based on width
|
131 |
+
new_width = 1080 - 2 * padding
|
132 |
+
new_height = int(new_width / aspect_ratio)
|
133 |
+
|
134 |
+
# Resize the image
|
135 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
136 |
+
log.append({"action": "resize", "new_width": str(new_width), "new_height": str(new_height)})
|
137 |
+
|
138 |
+
x = (1080 - new_width) // 2
|
139 |
+
y = 1080 - new_height - padding
|
140 |
+
else:
|
141 |
+
# New logic for handling cropped top and left, or top and right
|
142 |
+
if set(cropped_sides) == {"top", "left"} or set(cropped_sides) == {"top", "right"}:
|
143 |
+
new_height = target_size - padding # Ensure bottom padding
|
144 |
+
new_width = int(new_height * aspect_ratio)
|
145 |
+
|
146 |
+
# If new width exceeds canvas width, adjust based on width
|
147 |
+
if new_width > target_size:
|
148 |
+
new_width = target_size
|
149 |
+
new_height = int(new_width / aspect_ratio)
|
150 |
+
|
151 |
+
# Resize the image
|
152 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
153 |
+
log.append({"action": "resize", "new_width": str(new_width), "new_height": str(new_height)})
|
154 |
+
|
155 |
+
# Set position
|
156 |
+
if "left" in cropped_sides:
|
157 |
+
x = 0
|
158 |
+
else: # right in cropped_sides
|
159 |
+
x = target_size - new_width
|
160 |
+
y = 0
|
161 |
+
|
162 |
+
# If the resized image is taller than the canvas minus padding, crop from the bottom
|
163 |
+
if new_height > target_size - padding:
|
164 |
+
crop_bottom = new_height - (target_size - padding)
|
165 |
+
image = image.crop((0, 0, new_width, new_height - crop_bottom))
|
166 |
+
new_height = target_size - padding
|
167 |
+
log.append({"action": "crop_vertical", "bottom_pixels_removed": str(crop_bottom)})
|
168 |
+
|
169 |
+
log.append({"action": "position", "x": str(x), "y": str(y)})
|
170 |
+
elif set(cropped_sides) == {"bottom", "left", "right"}:
|
171 |
+
# Expand the image from the center
|
172 |
+
new_width = target_size
|
173 |
+
new_height = int(new_width / aspect_ratio)
|
174 |
+
|
175 |
+
if new_height < target_size:
|
176 |
+
new_height = target_size
|
177 |
+
new_width = int(new_height * aspect_ratio)
|
178 |
+
|
179 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
180 |
+
|
181 |
+
# Crop to fit the canvas
|
182 |
+
left = (new_width - target_size) // 2
|
183 |
+
top = 0
|
184 |
+
image = image.crop((left, top, left + target_size, top + target_size))
|
185 |
+
|
186 |
+
log.append({"action": "expand_and_crop", "new_size": f"{target_size}x{target_size}"})
|
187 |
+
x, y = 0, 0
|
188 |
+
elif cropped_sides == ["top"]:
|
189 |
+
# New logic for handling only top-cropped images
|
190 |
+
if image.width > image.height:
|
191 |
+
new_width = target_size
|
192 |
+
new_height = int(target_size / aspect_ratio)
|
193 |
+
else:
|
194 |
+
new_height = target_size - padding # Ensure bottom padding
|
195 |
+
new_width = int(new_height * aspect_ratio)
|
196 |
+
|
197 |
+
# Resize the image
|
198 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
199 |
+
log.append({"action": "resize", "new_width": str(new_width), "new_height": str(new_height)})
|
200 |
+
|
201 |
+
x = (1080 - new_width) // 2
|
202 |
+
y = 0 # Align to top
|
203 |
+
|
204 |
+
# Apply padding only to non-cropped sides
|
205 |
+
x = max(padding, min(x, 1080 - new_width - padding))
|
206 |
+
elif cropped_sides in [["right"], ["left"]]:
|
207 |
+
# New logic for handling only right-cropped or left-cropped images
|
208 |
+
if image.width > image.height:
|
209 |
+
new_width = target_size - padding # Ensure padding on non-cropped side
|
210 |
+
new_height = int(new_width / aspect_ratio)
|
211 |
+
else:
|
212 |
+
new_height = target_size - padding # Ensure bottom padding
|
213 |
+
new_width = int(new_height * aspect_ratio)
|
214 |
+
|
215 |
+
# Resize the image
|
216 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
217 |
+
log.append({"action": "resize", "new_width": str(new_width), "new_height": str(new_height)})
|
218 |
+
|
219 |
+
if cropped_sides == ["right"]:
|
220 |
+
x = 1080 - new_width # Align to right
|
221 |
+
else: # cropped_sides == ["left"]
|
222 |
+
x = 0 # Align to left without padding
|
223 |
+
y = 1080 - new_height - padding # Respect bottom padding
|
224 |
+
elif set(cropped_sides) == {"left", "right"}:
|
225 |
+
# Logic for handling images cropped on both left and right sides
|
226 |
+
new_width = 1080 # Expand to full width of canvas
|
227 |
+
|
228 |
+
# Calculate the aspect ratio of the original image
|
229 |
+
aspect_ratio = image.width / image.height
|
230 |
+
|
231 |
+
# Calculate the new height while maintaining aspect ratio
|
232 |
+
new_height = int(new_width / aspect_ratio)
|
233 |
+
|
234 |
+
# Resize the image
|
235 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
236 |
+
log.append({"action": "resize", "new_width": str(new_width), "new_height": str(new_height)})
|
237 |
+
|
238 |
+
# Set horizontal position (always 0 as it spans full width)
|
239 |
+
x = 0
|
240 |
+
|
241 |
+
# Calculate vertical position to respect bottom padding
|
242 |
+
y = 1080 - new_height - padding
|
243 |
+
|
244 |
+
# If the resized image is taller than the canvas, crop from the top only
|
245 |
+
if new_height > 1080 - padding:
|
246 |
+
crop_top = new_height - (1080 - padding)
|
247 |
+
image = image.crop((0, crop_top, new_width, new_height))
|
248 |
+
new_height = 1080 - padding
|
249 |
+
y = 0
|
250 |
+
log.append({"action": "crop_vertical", "top_pixels_removed": str(crop_top)})
|
251 |
+
else:
|
252 |
+
# Align the image to the bottom with padding
|
253 |
+
y = 1080 - new_height - padding
|
254 |
+
|
255 |
+
log.append({"action": "position", "x": str(x), "y": str(y)})
|
256 |
+
else:
|
257 |
+
# Use the original resizing logic for other partially cropped images
|
258 |
+
if image.width > image.height:
|
259 |
+
new_width = target_size
|
260 |
+
new_height = int(target_size / aspect_ratio)
|
261 |
+
else:
|
262 |
+
new_height = target_size
|
263 |
+
new_width = int(target_size * aspect_ratio)
|
264 |
+
|
265 |
+
# Resize the image
|
266 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
267 |
+
log.append({"action": "resize", "new_width": str(new_width), "new_height": str(new_height)})
|
268 |
+
|
269 |
+
# Center horizontally for all images
|
270 |
+
x = (1080 - new_width) // 2
|
271 |
+
y = 1080 - new_height - padding
|
272 |
+
|
273 |
+
# Adjust positions for cropped sides
|
274 |
+
if "top" in cropped_sides:
|
275 |
+
y = 0
|
276 |
+
elif "bottom" in cropped_sides:
|
277 |
+
y = 1080 - new_height
|
278 |
+
if "left" in cropped_sides:
|
279 |
+
x = 0
|
280 |
+
elif "right" in cropped_sides:
|
281 |
+
x = 1080 - new_width
|
282 |
+
|
283 |
+
# Apply padding only to non-cropped sides, but keep horizontal centering
|
284 |
+
if "left" not in cropped_sides and "right" not in cropped_sides:
|
285 |
+
x = (1080 - new_width) // 2 # Always center horizontally
|
286 |
+
if "top" not in cropped_sides and "bottom" not in cropped_sides:
|
287 |
+
y = max(padding, min(y, 1080 - new_height - padding))
|
288 |
+
# Create a new 1080x1080 canvas with a white background
|
289 |
+
canvas = Image.new("RGBA", (1080, 1080), (255, 255, 255, 255))
|
290 |
+
|
291 |
+
# Paste the resized image onto the canvas
|
292 |
+
canvas.paste(image, (x, y), image)
|
293 |
+
log.append({"action": "paste", "position": [str(x), str(y)]})
|
294 |
+
|
295 |
+
# Add visible black line for padding (for all images)
|
296 |
+
if add_padding_line:
|
297 |
+
draw = ImageDraw.Draw(canvas)
|
298 |
+
draw.rectangle([padding, padding, 1080 - padding, 1080 - padding], outline="black", width=5)
|
299 |
+
log.append({"action": "add_padding_line"})
|
300 |
+
|
301 |
+
# Save the final image
|
302 |
+
canvas.save(output_path)
|
303 |
+
log.append({"action": "save", "output_path": output_path})
|
304 |
+
|
305 |
+
return log
|
306 |
+
|
307 |
+
# Example usage
|
308 |
+
remove_blank_zone("bria_output", "test_output")
|