soiz1 commited on
Commit
27cf744
·
verified ·
1 Parent(s): bb4506e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -99
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import cv2
2
- import gradio as gr
3
  import os
4
  from PIL import Image
5
  import numpy as np
@@ -7,147 +6,131 @@ import torch
7
  from torch.autograd import Variable
8
  from torchvision import transforms
9
  import torch.nn.functional as F
10
- import gdown
11
- import matplotlib.pyplot as plt
 
12
  import warnings
13
  warnings.filterwarnings("ignore")
14
 
15
- os.system("git clone https://github.com/xuebinqin/DIS")
16
- os.system("mv DIS/IS-Net/* .")
17
-
18
- # project imports
19
- from data_loader_cache import normalize, im_reader, im_preprocess
20
- from models import *
21
-
22
- #Helpers
23
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
24
 
25
- # Download official weights
26
- if not os.path.exists("saved_models"):
27
- os.mkdir("saved_models")
28
- os.system("mv isnet.pth saved_models/")
29
-
30
  class GOSNormalize(object):
31
- '''
32
- Normalize the Image using torch.transforms
33
- '''
34
  def __init__(self, mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225]):
35
  self.mean = mean
36
  self.std = std
37
 
38
- def __call__(self,image):
39
- image = normalize(image,self.mean,self.std)
40
  return image
41
 
42
-
43
- transform = transforms.Compose([GOSNormalize([0.5,0.5,0.5],[1.0,1.0,1.0])])
44
 
45
  def load_image(im_path, hypar):
46
  im = im_reader(im_path)
47
  im, im_shp = im_preprocess(im, hypar["cache_size"])
48
- im = torch.divide(im,255.0)
49
  shape = torch.from_numpy(np.array(im_shp))
50
- return transform(im).unsqueeze(0), shape.unsqueeze(0) # make a batch of image, shape
51
 
52
-
53
- def build_model(hypar,device):
54
- net = hypar["model"]#GOSNETINC(3,1)
55
-
56
- # convert to half precision
57
  if(hypar["model_digit"]=="half"):
58
  net.half()
59
  for layer in net.modules():
60
  if isinstance(layer, nn.BatchNorm2d):
61
  layer.float()
62
-
63
  net.to(device)
64
-
65
  if(hypar["restore_model"]!=""):
66
  net.load_state_dict(torch.load(hypar["model_path"]+"/"+hypar["restore_model"], map_location=device))
67
  net.to(device)
68
  net.eval()
69
  return net
70
 
71
-
72
- def predict(net, inputs_val, shapes_val, hypar, device):
73
- '''
74
- Given an Image, predict the mask
75
- '''
76
  net.eval()
77
-
78
  if(hypar["model_digit"]=="full"):
79
  inputs_val = inputs_val.type(torch.FloatTensor)
80
  else:
81
  inputs_val = inputs_val.type(torch.HalfTensor)
82
-
83
 
84
- inputs_val_v = Variable(inputs_val, requires_grad=False).to(device) # wrap inputs in Variable
85
-
86
- ds_val = net(inputs_val_v)[0] # list of 6 results
87
-
88
- pred_val = ds_val[0][0,:,:,:] # B x 1 x H x W # we want the first one which is the most accurate prediction
89
 
90
- ## recover the prediction spatial size to the orignal image size
91
  pred_val = torch.squeeze(F.upsample(torch.unsqueeze(pred_val,0),(shapes_val[0][0],shapes_val[0][1]),mode='bilinear'))
92
 
93
  ma = torch.max(pred_val)
94
  mi = torch.min(pred_val)
95
- pred_val = (pred_val-mi)/(ma-mi) # max = 1
96
 
97
  if device == 'cuda': torch.cuda.empty_cache()
98
- return (pred_val.detach().cpu().numpy()*255).astype(np.uint8) # it is the mask we need
99
-
100
- # Set Parameters
101
- hypar = {} # paramters for inferencing
102
-
103
-
104
- hypar["model_path"] ="./saved_models" ## load trained weights from this path
105
- hypar["restore_model"] = "isnet.pth" ## name of the to-be-loaded weights
106
- hypar["interm_sup"] = False ## indicate if activate intermediate feature supervision
107
-
108
- ## choose floating point accuracy --
109
- hypar["model_digit"] = "full" ## indicates "half" or "full" accuracy of float number
110
- hypar["seed"] = 0
111
-
112
- hypar["cache_size"] = [1024, 1024] ## cached input spatial resolution, can be configured into different size
113
-
114
- ## data augmentation parameters ---
115
- hypar["input_size"] = [1024, 1024] ## mdoel input spatial size, usually use the same value hypar["cache_size"], which means we don't further resize the images
116
- hypar["crop_size"] = [1024, 1024] ## random crop size from the input, it is usually set as smaller than hypar["cache_size"], e.g., [920,920] for data augmentation
117
-
118
- hypar["model"] = ISNetDIS()
119
-
120
- # Build Model
121
  net = build_model(hypar, device)
122
 
 
 
 
123
 
124
- def inference(image):
125
- image_path = image
126
-
127
- image_tensor, orig_size = load_image(image_path, hypar)
128
- mask = predict(net, image_tensor, orig_size, hypar, device)
129
-
130
- pil_mask = Image.fromarray(mask).convert('L')
131
- im_rgb = Image.open(image).convert("RGB")
132
-
133
- im_rgba = im_rgb.copy()
134
- im_rgba.putalpha(pil_mask)
135
-
136
- return [im_rgba, pil_mask]
137
-
138
-
139
- title = "Highly Accurate Dichotomous Image Segmentation"
140
- description = "This is an unofficial demo for DIS, a model that can remove the background from a given image. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below.<br>GitHub: https://github.com/xuebinqin/DIS<br>Telegram bot: https://t.me/restoration_photo_bot<br>[![](https://img.shields.io/twitter/follow/DoEvent?label=@DoEvent&style=social)](https://twitter.com/DoEvent)"
141
- article = "<div><center><img src='https://visitor-badge.glitch.me/badge?page_id=max_skobeev_dis_cmp_public' alt='visitor badge'></center></div>"
142
-
143
- interface = gr.Interface(
144
- fn=inference,
145
- inputs=gr.Image(type='filepath'),
146
- outputs=[gr.Image(type='filepath', format="png"), gr.Image(type='filepath', format="png")],
147
- examples=[['robot.png'], ['ship.png']],
148
- title=title,
149
- description=description,
150
- article=article,
151
- flagging_mode="never",
152
- cache_mode="lazy",
153
- ).queue(api_open=True).launch(show_error=True, show_api=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import cv2
 
2
  import os
3
  from PIL import Image
4
  import numpy as np
 
6
  from torch.autograd import Variable
7
  from torchvision import transforms
8
  import torch.nn.functional as F
9
+ from flask import Flask, request, jsonify, send_file
10
+ import io
11
+ from werkzeug.utils import secure_filename
12
  import warnings
13
  warnings.filterwarnings("ignore")
14
 
15
+ # モデルと設定の初期化
 
 
 
 
 
 
 
16
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
17
 
 
 
 
 
 
18
  class GOSNormalize(object):
 
 
 
19
  def __init__(self, mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225]):
20
  self.mean = mean
21
  self.std = std
22
 
23
+ def __call__(self, image):
24
+ image = normalize(image, self.mean, self.std)
25
  return image
26
 
27
+ transform = transforms.Compose([GOSNormalize([0.5,0.5,0.5],[1.0,1.0,1.0])])
 
28
 
29
  def load_image(im_path, hypar):
30
  im = im_reader(im_path)
31
  im, im_shp = im_preprocess(im, hypar["cache_size"])
32
+ im = torch.divide(im, 255.0)
33
  shape = torch.from_numpy(np.array(im_shp))
34
+ return transform(im).unsqueeze(0), shape.unsqueeze(0)
35
 
36
+ def build_model(hypar, device):
37
+ net = hypar["model"]
 
 
 
38
  if(hypar["model_digit"]=="half"):
39
  net.half()
40
  for layer in net.modules():
41
  if isinstance(layer, nn.BatchNorm2d):
42
  layer.float()
 
43
  net.to(device)
 
44
  if(hypar["restore_model"]!=""):
45
  net.load_state_dict(torch.load(hypar["model_path"]+"/"+hypar["restore_model"], map_location=device))
46
  net.to(device)
47
  net.eval()
48
  return net
49
 
50
+ def predict(net, inputs_val, shapes_val, hypar, device):
 
 
 
 
51
  net.eval()
 
52
  if(hypar["model_digit"]=="full"):
53
  inputs_val = inputs_val.type(torch.FloatTensor)
54
  else:
55
  inputs_val = inputs_val.type(torch.HalfTensor)
 
56
 
57
+ inputs_val_v = Variable(inputs_val, requires_grad=False).to(device)
58
+ ds_val = net(inputs_val_v)[0]
59
+ pred_val = ds_val[0][0,:,:,:]
 
 
60
 
 
61
  pred_val = torch.squeeze(F.upsample(torch.unsqueeze(pred_val,0),(shapes_val[0][0],shapes_val[0][1]),mode='bilinear'))
62
 
63
  ma = torch.max(pred_val)
64
  mi = torch.min(pred_val)
65
+ pred_val = (pred_val-mi)/(ma-mi)
66
 
67
  if device == 'cuda': torch.cuda.empty_cache()
68
+ return (pred_val.detach().cpu().numpy()*255).astype(np.uint8)
69
+
70
+ # パラメータ設定
71
+ hypar = {
72
+ "model_path": "./saved_models",
73
+ "restore_model": "isnet.pth",
74
+ "interm_sup": False,
75
+ "model_digit": "full",
76
+ "seed": 0,
77
+ "cache_size": [1024, 1024],
78
+ "input_size": [1024, 1024],
79
+ "crop_size": [1024, 1024],
80
+ "model": ISNetDIS()
81
+ }
82
+
83
+ # モデルをビルド
 
 
 
 
 
 
 
84
  net = build_model(hypar, device)
85
 
86
+ app = Flask(__name__)
87
+ app.config['UPLOAD_FOLDER'] = 'uploads'
88
+ os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
89
 
90
+ @app.route('/api/remove-background', methods=['POST'])
91
+ def remove_background():
92
+ if 'file' not in request.files:
93
+ return jsonify({"error": "No file provided"}), 400
94
+
95
+ file = request.files['file']
96
+ if file.filename == '':
97
+ return jsonify({"error": "No selected file"}), 400
98
+
99
+ # ファイルを保存
100
+ filename = secure_filename(file.filename)
101
+ filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
102
+ file.save(filepath)
103
+
104
+ try:
105
+ # 画像処理
106
+ image_tensor, orig_size = load_image(filepath, hypar)
107
+ mask = predict(net, image_tensor, orig_size, hypar, device)
108
+
109
+ pil_mask = Image.fromarray(mask).convert('L')
110
+ im_rgb = Image.open(filepath).convert("RGB")
111
+ im_rgba = im_rgb.copy()
112
+ im_rgba.putalpha(pil_mask)
113
+
114
+ # 結果をバイトデータとして返す
115
+ output_buffer = io.BytesIO()
116
+ im_rgba.save(output_buffer, format="PNG")
117
+ output_buffer.seek(0)
118
+
119
+ # 一時ファイルを削除
120
+ os.remove(filepath)
121
+
122
+ return send_file(
123
+ output_buffer,
124
+ mimetype='image/png',
125
+ as_attachment=True,
126
+ download_name='output.png'
127
+ )
128
+ except Exception as e:
129
+ return jsonify({"error": str(e)}), 500
130
+
131
+ @app.route('/api/health', methods=['GET'])
132
+ def health_check():
133
+ return jsonify({"status": "healthy"}), 200
134
+
135
+ if __name__ == '__main__':
136
+ app.run(host='0.0.0.0', port=5000, debug=True)