KenjieDec commited on
Commit
da7e1d8
·
verified ·
1 Parent(s): 314a753

Added a new option + Fixed zoom

Browse files

- New option: Result Image Scale
- Fixed: In-site zoom not really aligning with input image, as the result resolution can be at a different resolution than input

Files changed (2) hide show
  1. app.py +21 -21
  2. requirements.txt +1 -0
app.py CHANGED
@@ -21,20 +21,19 @@ for model_path in ['fbcnn_gray.pth','fbcnn_color.pth']:
21
  r = requests.get(url, allow_redirects=True)
22
  open(model_path, 'wb').write(r.content)
23
 
24
- def inference(input_img, is_gray, input_quality, zoom, x_shift, y_shift):
25
 
26
  print("datetime:", datetime.datetime.utcnow())
27
  input_img_width, input_img_height = Image.fromarray(input_img).size
28
  print("img size:", (input_img_width, input_img_height))
29
-
30
- if (input_img_width > 1080) or (input_img_height > 1080):
31
- resize_ratio = min(1080/input_img_width, 1080/input_img_height)
32
- resized_input = Image.fromarray(input_img).resize(
33
- (int(input_img_width*resize_ratio) + (input_img_width*resize_ratio < 1),
34
- int(input_img_height*resize_ratio) + (input_img_height*resize_ratio < 1)),
35
- resample=Image.BICUBIC)
36
- input_img = np.array(resized_input)
37
- print("input image resized to:", resized_input.size)
38
 
39
  if is_gray:
40
  n_channels = 1
@@ -148,8 +147,8 @@ def zoom_image(zoom, x_shift, y_shift, input_img, output_img = None):
148
  y_offset = int((img_h - zoom_h) * y_shift)
149
 
150
  crop_box = (x_offset, y_offset, x_offset + zoom_w, y_offset + zoom_h)
151
- img = img.crop(crop_box).resize((img_w, img_h), Image.BILINEAR)
152
- out_img = out_img.crop(crop_box).resize((img_w, img_h), Image.BILINEAR)
153
 
154
  return (img, out_img)
155
 
@@ -161,6 +160,7 @@ with gr.Blocks() as demo:
161
  output_img = gr.Image(label="Result")
162
 
163
  is_gray = gr.Checkbox(label="Grayscale (Check this if your image is grayscale)")
 
164
  input_quality = gr.Slider(1, 100, step=1, label="Intensity (Higher = stronger JPEG artifact removal)")
165
  zoom = gr.Slider(10, 100, step=1, value=50, label="Zoom Percentage (0 = original size)")
166
  x_shift = gr.Slider(0, 100, step=1, label="Horizontal shift Percentage (Before/After)")
@@ -173,19 +173,19 @@ with gr.Blocks() as demo:
173
 
174
  run.click(
175
  inference,
176
- inputs=[input_img, is_gray, input_quality, zoom, x_shift, y_shift],
177
  outputs=[output_img, before_after]
178
  )
179
 
180
  gr.Examples([
181
- ["doraemon.jpg", False, 60, 58, 50, 50],
182
- ["tomandjerry.jpg", False, 60, 60, 57, 44],
183
- ["somepanda.jpg", True, 100, 70, 8, 24],
184
- ["cemetry.jpg", False, 70, 80, 76, 62],
185
- ["michelangelo_david.jpg", True, 30, 88, 53, 27],
186
- ["elon_musk.jpg", False, 45, 75, 33, 30],
187
- ["text.jpg", True, 70, 50, 11, 29]
188
- ], inputs=[input_img, is_gray, input_quality, zoom, x_shift, y_shift])
189
 
190
  zoom.release(zoom_image, inputs=[zoom, x_shift, y_shift, input_img], outputs=[before_after])
191
  x_shift.release(zoom_image, inputs=[zoom, x_shift, y_shift, input_img], outputs=[before_after])
 
21
  r = requests.get(url, allow_redirects=True)
22
  open(model_path, 'wb').write(r.content)
23
 
24
+ def inference(input_img, is_gray, res_percentage, input_quality, zoom, x_shift, y_shift):
25
 
26
  print("datetime:", datetime.datetime.utcnow())
27
  input_img_width, input_img_height = Image.fromarray(input_img).size
28
  print("img size:", (input_img_width, input_img_height))
29
+
30
+ resized_input = Image.fromarray(input_img).resize(
31
+ (
32
+ int(input_img_width * (res_percentage/100)),
33
+ int(input_img_height * (res_percentage/100))
34
+ ), resample = Image.BICUBIC)
35
+ input_img = np.array(resized_input)
36
+ print("input image resized to:", resized_input.size)
 
37
 
38
  if is_gray:
39
  n_channels = 1
 
147
  y_offset = int((img_h - zoom_h) * y_shift)
148
 
149
  crop_box = (x_offset, y_offset, x_offset + zoom_w, y_offset + zoom_h)
150
+ img = img.resize((img_w, img_h), Image.BILINEAR).crop(crop_box)
151
+ out_img = out_img.resize((img_w, img_h), Image.BILINEAR).crop(crop_box)
152
 
153
  return (img, out_img)
154
 
 
160
  output_img = gr.Image(label="Result")
161
 
162
  is_gray = gr.Checkbox(label="Grayscale (Check this if your image is grayscale)")
163
+ max_res = gr.Slider(1, 100, step=0.5, label="Output image resolution Percentage (Higher% = longer processing time)")
164
  input_quality = gr.Slider(1, 100, step=1, label="Intensity (Higher = stronger JPEG artifact removal)")
165
  zoom = gr.Slider(10, 100, step=1, value=50, label="Zoom Percentage (0 = original size)")
166
  x_shift = gr.Slider(0, 100, step=1, label="Horizontal shift Percentage (Before/After)")
 
173
 
174
  run.click(
175
  inference,
176
+ inputs=[input_img, is_gray, max_res, input_quality, zoom, x_shift, y_shift],
177
  outputs=[output_img, before_after]
178
  )
179
 
180
  gr.Examples([
181
+ ["doraemon.jpg", False, 100, 60, 58, 50, 50],
182
+ ["tomandjerry.jpg", False, 100, 60, 60, 57, 44],
183
+ ["somepanda.jpg", True, 100, 100, 70, 8, 24],
184
+ ["cemetry.jpg", False, 100, 70, 80, 76, 62],
185
+ ["michelangelo_david.jpg", True, 100, 30, 88, 53, 27],
186
+ ["elon_musk.jpg", False, 100, 45, 75, 33, 30],
187
+ ["text.jpg", True, 100, 70, 50, 11, 29]
188
+ ], inputs=[input_img, is_gray, max_res, input_quality, zoom, x_shift, y_shift])
189
 
190
  zoom.release(zoom_image, inputs=[zoom, x_shift, y_shift, input_img], outputs=[before_after])
191
  x_shift.release(zoom_image, inputs=[zoom, x_shift, y_shift, input_img], outputs=[before_after])
requirements.txt CHANGED
@@ -5,3 +5,4 @@ gradio
5
  jinja2
6
  matplotlib
7
  gradio_imageslider
 
 
5
  jinja2
6
  matplotlib
7
  gradio_imageslider
8
+ pydantic==2.10.6