Spaces:
Sleeping
Sleeping
import numpy as np | |
import cv2 | |
from PIL import Image | |
import os | |
def convert_to_bw(image): | |
""" | |
Converts a PIL image to black & white (grayscale), | |
and then back to RGB to maintain compatibility with other processes. | |
Parameters: | |
image (PIL.Image): Input RGB image. | |
Returns: | |
PIL.Image: Black & white image in RGB format. | |
""" | |
return image.convert("L").convert("RGB") | |
def load_colorization_model(): | |
""" | |
Loads the pre-trained Caffe model for colorizing black & white images. | |
Model files required: | |
- colorization_deploy_v2.prototxt | |
- colorization_release_v2.caffemodel | |
- pts_in_hull.npy | |
Returns: | |
cv2.dnn_Net: Loaded and initialized OpenCV DNN colorization model. | |
""" | |
# Paths to model architecture, weights, and cluster centers | |
base_path = os.path.join(os.path.dirname(__file__), "models") | |
# proto_file = "models/colorization_deploy_v2.prototxt" | |
# model_file = "models/colorization_release_v2.caffemodel" | |
# cluster_file = "models/pts_in_hull.npy" | |
proto_file = os.path.join(base_path, "colorization_deploy_v2.prototxt") | |
model_file = os.path.join(base_path, "colorization_release_v2.caffemodel") | |
cluster_file = os.path.join(base_path, "pts_in_hull.npy") | |
# Load the model using OpenCV DNN module | |
net = cv2.dnn.readNetFromCaffe(proto_file, model_file) | |
pts = np.load(cluster_file) | |
# Populate cluster centers as 1x1 convolution kernel | |
class8_ab = net.getLayerId("class8_ab") | |
conv8_313_rh = net.getLayerId("conv8_313_rh") | |
pts = pts.transpose().reshape(2, 313, 1, 1) | |
net.getLayer(class8_ab).blobs = [pts.astype(np.float32)] | |
net.getLayer(conv8_313_rh).blobs = [np.full([1, 313], 2.606, dtype=np.float32)] | |
return net | |
def colorize_bw_image(pil_img, net): | |
""" | |
Colorizes a grayscale (black & white) image using a pre-trained DNN model. | |
Parameters: | |
pil_img (PIL.Image): Input grayscale image in RGB format. | |
net (cv2.dnn_Net): Loaded OpenCV DNN colorization model. | |
Returns: | |
PIL.Image: Colorized image in RGB format. | |
""" | |
# Convert PIL image to NumPy array | |
img = np.array(pil_img) | |
img_rgb = img[:, :, [2, 1, 0]] # Convert RGB to BGR | |
img_rgb = img_rgb.astype("float32") / 255.0 | |
# Convert to LAB color space and extract L channel | |
img_lab = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2LAB) | |
l_channel = img_lab[:, :, 0] | |
# Resize L channel to match model input size and normalize | |
input_l = cv2.resize(l_channel, (224, 224)) | |
input_l -= 50 | |
# Run inference | |
net.setInput(cv2.dnn.blobFromImage(input_l)) | |
ab_channels = net.forward()[0, :, :, :].transpose((1, 2, 0)) # shape: (56, 56, 2) | |
# Resize predicted ab channels to match original image size | |
ab_channels = cv2.resize(ab_channels, (img.shape[1], img.shape[0])) | |
# Merge original L channel with predicted ab channels | |
lab_output = np.concatenate((l_channel[:, :, np.newaxis], ab_channels), axis=2) | |
# Convert LAB to BGR, clip values, and convert to uint8 | |
bgr_out = cv2.cvtColor(lab_output, cv2.COLOR_LAB2BGR) | |
bgr_out = np.clip(bgr_out, 0, 1) | |
# Convert back to RGB and return as PIL Image | |
final_rgb = (bgr_out[:, :, [2, 1, 0]] * 255).astype("uint8") | |
return Image.fromarray(final_rgb) | |