Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# app.py
|
2 |
+
|
3 |
+
import os
|
4 |
+
import gradio as gr
|
5 |
+
import torch
|
6 |
+
import torch.nn as nn
|
7 |
+
from PIL import Image
|
8 |
+
from torchvision.transforms import ToTensor
|
9 |
+
import numpy as np
|
10 |
+
from concurrent.futures import ThreadPoolExecutor
|
11 |
+
from skimage import exposure
|
12 |
+
|
13 |
+
# --- Model Definition ---
|
14 |
+
class DenoisingModel(nn.Module):
|
15 |
+
def __init__(self):
|
16 |
+
super(DenoisingModel, self).__init__()
|
17 |
+
self.enc1 = nn.Sequential(
|
18 |
+
nn.Conv2d(3, 64, 3, padding=1),
|
19 |
+
nn.ReLU(),
|
20 |
+
nn.Conv2d(64, 64, 3, padding=1),
|
21 |
+
nn.ReLU()
|
22 |
+
)
|
23 |
+
self.pool1 = nn.MaxPool2d(2, 2)
|
24 |
+
|
25 |
+
self.up1 = nn.ConvTranspose2d(64, 64, 2, stride=2)
|
26 |
+
self.dec1 = nn.Sequential(
|
27 |
+
nn.Conv2d(64, 64, 3, padding=1),
|
28 |
+
nn.ReLU(),
|
29 |
+
nn.Conv2d(64, 3, 3, padding=1)
|
30 |
+
)
|
31 |
+
|
32 |
+
def forward(self, x):
|
33 |
+
e1 = self.enc1(x)
|
34 |
+
p1 = self.pool1(e1)
|
35 |
+
u1 = self.up1(p1)
|
36 |
+
d1 = self.dec1(u1)
|
37 |
+
return d1
|
38 |
+
|
39 |
+
# --- Denoising Patch Function ---
|
40 |
+
def denoise_patch(model, patch):
|
41 |
+
transform = ToTensor()
|
42 |
+
input_patch = transform(patch).unsqueeze(0)
|
43 |
+
|
44 |
+
with torch.no_grad():
|
45 |
+
output_patch = model(input_patch)
|
46 |
+
|
47 |
+
denoised_patch = output_patch.squeeze(0).permute(1, 2, 0).numpy() * 255
|
48 |
+
denoised_patch = np.clip(denoised_patch, 0, 255).astype(np.uint8)
|
49 |
+
|
50 |
+
original_patch = np.array(patch)
|
51 |
+
very_bright_mask = original_patch > 240
|
52 |
+
bright_mask = (original_patch > 220) & (original_patch <= 240)
|
53 |
+
|
54 |
+
denoised_patch[very_bright_mask] = original_patch[very_bright_mask]
|
55 |
+
|
56 |
+
blend_factor = 0.7
|
57 |
+
denoised_patch[bright_mask] = (
|
58 |
+
blend_factor * original_patch[bright_mask] +
|
59 |
+
(1 - blend_factor) * denoised_patch[bright_mask]
|
60 |
+
)
|
61 |
+
|
62 |
+
return denoised_patch
|
63 |
+
|
64 |
+
# --- Main Denoise Image Function (Dynamically uses all CPU cores) ---
|
65 |
+
def denoise_image(image: Image.Image, model_path: str, patch_size: int = 256, overlap: int = 32) -> Image.Image:
|
66 |
+
# Dynamically set the number of threads based on available CPU cores
|
67 |
+
num_threads = os.cpu_count()
|
68 |
+
if num_threads is None: # Fallback in case os.cpu_count() returns None
|
69 |
+
num_threads = 2 # Default to 2 if cannot detect
|
70 |
+
print(f"Utilizing {num_threads} CPU cores for parallel processing.")
|
71 |
+
|
72 |
+
# Load the model
|
73 |
+
model = DenoisingModel()
|
74 |
+
# Ensure model is loaded on CPU, crucial for Hugging Face Spaces free tier
|
75 |
+
checkpoint = torch.load(model_path, map_location=torch.device('cpu'))
|
76 |
+
model.load_state_dict(checkpoint['model_state_dict'])
|
77 |
+
model.eval() # Set model to evaluation mode
|
78 |
+
|
79 |
+
# Process image (convert to RGB, get dimensions)
|
80 |
+
image = image.convert("RGB")
|
81 |
+
width, height = image.size
|
82 |
+
|
83 |
+
# Calculate padding needed for full patches
|
84 |
+
pad_right = (patch_size - (width % patch_size)) % patch_size if width % patch_size != 0 else 0
|
85 |
+
pad_bottom = (patch_size - (height % patch_size)) % patch_size if height % patch_size != 0 else 0
|
86 |
+
|
87 |
+
padded_width = width + pad_right
|
88 |
+
padded_height = height + pad_bottom
|
89 |
+
|
90 |
+
# Create padded image with reflection padding
|
91 |
+
padded_image = Image.new("RGB", (padded_width, padded_height))
|
92 |
+
padded_image.paste(image, (0, 0)) # Paste original image
|
93 |
+
|
94 |
+
# Fill borders with reflected content
|
95 |
+
if pad_right > 0:
|
96 |
+
right_border = image.crop((width - pad_right, 0, width, height))
|
97 |
+
padded_image.paste(right_border.transpose(Image.FLIP_LEFT_RIGHT), (width, 0))
|
98 |
+
if pad_bottom > 0:
|
99 |
+
bottom_border = image.crop((0, height - pad_bottom, width, height))
|
100 |
+
padded_image.paste(bottom_border.transpose(Image.FLIP_TOP_BOTTOM), (0, height))
|
101 |
+
if pad_right > 0 and pad_bottom > 0:
|
102 |
+
corner = image.crop((width - pad_right, height - pad_bottom, width, height))
|
103 |
+
padded_image.paste(corner.transpose(Image.FLIP_LEFT_RIGHT).transpose(Image.FLIP_TOP_BOTTOM),
|
104 |
+
(width, height))
|
105 |
+
|
106 |
+
# Generate patches and their positions for processing
|
107 |
+
patches = []
|
108 |
+
positions = []
|
109 |
+
# Adjust ranges to ensure full patch sizes near borders using min()
|
110 |
+
for i in range(0, padded_height, patch_size - overlap):
|
111 |
+
for j in range(0, padded_width, patch_size - overlap):
|
112 |
+
# Ensure patch doesn't go out of bounds for the last patches
|
113 |
+
actual_i = min(i, padded_height - patch_size)
|
114 |
+
actual_j = min(j, padded_width - patch_size)
|
115 |
+
patch = padded_image.crop((actual_j, actual_i, actual_j + patch_size, actual_i + patch_size))
|
116 |
+
patches.append(patch)
|
117 |
+
positions.append((actual_i, actual_j))
|
118 |
+
|
119 |
+
# Process patches in parallel using ThreadPoolExecutor
|
120 |
+
with ThreadPoolExecutor(max_workers=num_threads) as executor:
|
121 |
+
denoised_patches = list(executor.map(lambda p: denoise_patch(model, p), patches))
|
122 |
+
|
123 |
+
# Reconstruct the image from denoised patches using blending
|
124 |
+
denoised_image_np = np.zeros((padded_height, padded_width, 3), dtype=np.float32)
|
125 |
+
weight_map = np.zeros((padded_height, padded_width), dtype=np.float32)
|
126 |
+
|
127 |
+
for (i, j), denoised_patch in zip(positions, denoised_patches):
|
128 |
+
patch_height, patch_width, _ = denoised_patch.shape
|
129 |
+
patch_weights = np.ones((patch_height, patch_width), dtype=np.float32)
|
130 |
+
|
131 |
+
# Apply smooth blending weights at overlaps
|
132 |
+
if i > 0:
|
133 |
+
patch_weights[:overlap, :] *= np.linspace(0, 1, overlap)[:, np.newaxis]
|
134 |
+
if j > 0:
|
135 |
+
patch_weights[:, :overlap] *= np.linspace(0, 1, overlap)[np.newaxis, :]
|
136 |
+
if i + patch_height < padded_height: # Check if it's not the last row
|
137 |
+
patch_weights[-overlap:, :] *= np.linspace(1, 0, overlap)[:, np.newaxis]
|
138 |
+
if j + patch_width < padded_width: # Check if it's not the last column
|
139 |
+
patch_weights[:, -overlap:] *= np.linspace(1, 0, overlap)[np.newaxis, :]
|
140 |
+
|
141 |
+
# Clip values and apply gamma correction before blending
|
142 |
+
denoised_patch_processed = exposure.adjust_gamma(np.clip(denoised_patch, 0, 255), gamma=1.0)
|
143 |
+
|
144 |
+
denoised_image_np[i:i + patch_height, j:j + patch_width] += (
|
145 |
+
denoised_patch_processed * patch_weights[:, :, np.newaxis]
|
146 |
+
)
|
147 |
+
weight_map[i:i + patch_height, j:j + patch_width] += patch_weights
|
148 |
+
|
149 |
+
# Normalize by weights to get the final blended image
|
150 |
+
mask = weight_map > 0
|
151 |
+
denoised_image_np[mask] /= weight_map[mask, np.newaxis]
|
152 |
+
|
153 |
+
# Crop back to original dimensions and finalize
|
154 |
+
final_image_np = denoised_image_np[:height, :width]
|
155 |
+
final_image_np = np.clip(final_image_np, 0, 255).astype(np.uint8)
|
156 |
+
|
157 |
+
return Image.fromarray(final_image_np)
|
158 |
+
|
159 |
+
# --- Gradio Interface Setup ---
|
160 |
+
|
161 |
+
# Function to find available models in the 'models' directory
|
162 |
+
def get_available_models():
|
163 |
+
model_dir = "models"
|
164 |
+
if not os.path.exists(model_dir):
|
165 |
+
print(f"Warning: '{model_dir}' directory not found. No models will be available.")
|
166 |
+
return []
|
167 |
+
|
168 |
+
# Filter for .pth or .pt files
|
169 |
+
models = [f for f in os.listdir(model_dir) if f.endswith(".pth") or f.endswith(".pt")]
|
170 |
+
if not models:
|
171 |
+
print(f"Warning: No .pth or .pt model files found in '{model_dir}'.")
|
172 |
+
return models
|
173 |
+
|
174 |
+
# The main Gradio function that orchestrates the denoising process
|
175 |
+
def gradio_interface(input_image: np.ndarray, model_name: str, progress=gr.Progress(track_tqdm=True)) -> Image.Image:
|
176 |
+
if input_image is None:
|
177 |
+
raise gr.Error("Please upload an image to denoise.")
|
178 |
+
if not model_name:
|
179 |
+
raise gr.Error("Please select a model from the dropdown.")
|
180 |
+
|
181 |
+
# Convert numpy input image from Gradio to PIL Image for processing
|
182 |
+
pil_image = Image.fromarray(input_image)
|
183 |
+
|
184 |
+
# Construct full model path
|
185 |
+
model_path = os.path.join("models", model_name)
|
186 |
+
|
187 |
+
print(f"Starting denoising process with model: '{model_name}'")
|
188 |
+
progress(0, desc=f"Loading model: {model_name}...")
|
189 |
+
|
190 |
+
# Call the core denoising function
|
191 |
+
denoised_pil_image = denoise_image(pil_image, model_path)
|
192 |
+
|
193 |
+
print("Denoising completed successfully.")
|
194 |
+
progress(1, desc="Done!")
|
195 |
+
|
196 |
+
return denoised_pil_image
|
197 |
+
|
198 |
+
# Get initial list of models for the dropdown
|
199 |
+
available_models = get_available_models()
|
200 |
+
|
201 |
+
# Define the Gradio interface using gr.Blocks for a structured layout
|
202 |
+
with gr.Blocks(theme=gr.themes.Soft(), title="Image Denoiser") as demo:
|
203 |
+
gr.Markdown(
|
204 |
+
"""
|
205 |
+
# 🖼️ Universal Image Denoiser
|
206 |
+
Upload an image and select a pre-trained model to effectively remove noise.
|
207 |
+
"""
|
208 |
+
)
|
209 |
+
with gr.Row():
|
210 |
+
with gr.Column(scale=1):
|
211 |
+
input_img = gr.Image(type="numpy", label="Input Image", value=None) # Set initial value to None
|
212 |
+
|
213 |
+
# Dropdown for model selection
|
214 |
+
model_dropdown = gr.Dropdown(
|
215 |
+
choices=available_models,
|
216 |
+
label="Select Denoising Model",
|
217 |
+
value=available_models[0] if available_models else None, # Pre-select first model if available
|
218 |
+
info="Place your .pth or .pt model files in the 'models/' directory."
|
219 |
+
)
|
220 |
+
|
221 |
+
denoise_button = gr.Button("Denoise Image", variant="primary")
|
222 |
+
gr.Markdown(
|
223 |
+
"""
|
224 |
+
**Note:** Processing large images can take time. The app utilizes all available CPU cores for faster denoising.
|
225 |
+
"""
|
226 |
+
)
|
227 |
+
with gr.Column(scale=1):
|
228 |
+
output_img = gr.Image(type="pil", label="Denoised Image")
|
229 |
+
|
230 |
+
# Examples section to demonstrate usage
|
231 |
+
if available_models and os.path.exists("examples"):
|
232 |
+
example_images = []
|
233 |
+
# Find some example images (e.g., .png, .jpg)
|
234 |
+
for fname in os.listdir("examples"):
|
235 |
+
if fname.lower().endswith(('.png', '.jpg', '.jpeg')):
|
236 |
+
example_images.append(os.path.join("examples", fname))
|
237 |
+
|
238 |
+
# Create examples only if there are models AND example images
|
239 |
+
if example_images:
|
240 |
+
gr.Examples(
|
241 |
+
examples=[[img_path, available_models[0]] for img_path in example_images],
|
242 |
+
inputs=[input_img, model_dropdown],
|
243 |
+
outputs=output_img,
|
244 |
+
fn=gradio_interface,
|
245 |
+
cache_examples=True # Speeds up example loading
|
246 |
+
)
|
247 |
+
else:
|
248 |
+
gr.Markdown("*(No example images found in 'examples/' directory)*")
|
249 |
+
else:
|
250 |
+
gr.Markdown("*(No models or example images found to populate examples)*")
|
251 |
+
|
252 |
+
|
253 |
+
# Connect the button click to the denoising function
|
254 |
+
denoise_button.click(
|
255 |
+
fn=gradio_interface,
|
256 |
+
inputs=[input_img, model_dropdown],
|
257 |
+
outputs=output_img
|
258 |
+
)
|
259 |
+
|
260 |
+
if __name__ == "__main__":
|
261 |
+
demo.launch()
|