Saad0KH commited on
Commit
62072ec
·
verified ·
1 Parent(s): 4579257

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -53
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import os
2
- import requests
3
  from flask import Flask, request, jsonify,send_file
4
  from PIL import Image
5
  from io import BytesIO
@@ -33,70 +32,107 @@ from torchvision.transforms.functional import to_pil_image
33
 
34
  app = Flask(__name__)
35
 
36
- # Chemins de base pour les modèles
37
  base_path = 'yisol/IDM-VTON'
 
38
 
39
- # Chargement des modèles
40
  unet = UNet2DConditionModel.from_pretrained(
41
  base_path,
42
  subfolder="unet",
43
  torch_dtype=torch.float16,
44
  force_download=False
45
  )
 
46
  tokenizer_one = AutoTokenizer.from_pretrained(
47
  base_path,
48
  subfolder="tokenizer",
 
49
  use_fast=False,
50
  force_download=False
51
  )
52
  tokenizer_two = AutoTokenizer.from_pretrained(
53
  base_path,
54
  subfolder="tokenizer_2",
 
55
  use_fast=False,
56
  force_download=False
57
  )
58
  noise_scheduler = DDPMScheduler.from_pretrained(base_path, subfolder="scheduler")
59
- text_encoder_one = CLIPTextModel.from_pretrained(base_path, subfolder="text_encoder", torch_dtype=torch.float16)
60
- text_encoder_two = CLIPTextModelWithProjection.from_pretrained(base_path, subfolder="text_encoder_2", torch_dtype=torch.float16)
61
- image_encoder = CLIPVisionModelWithProjection.from_pretrained(base_path, subfolder="image_encoder", torch_dtype=torch.float16)
62
- vae = AutoencoderKL.from_pretrained(base_path, subfolder="vae", torch_dtype=torch.float16)
63
- UNet_Encoder = UNet2DConditionModel_ref.from_pretrained(base_path, subfolder="unet_encoder", torch_dtype=torch.float16)
64
 
65
- parsing_model = Parsing(0)
66
- openpose_model = OpenPose(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
- # Préparation du pipeline Tryon
69
- pipe = TryonPipeline.from_pretrained(
70
  base_path,
71
- unet=unet,
72
- vae=vae,
73
- feature_extractor=CLIPImageProcessor(),
74
- text_encoder=text_encoder_one,
75
- text_encoder_2=text_encoder_two,
76
- tokenizer=tokenizer_one,
77
- tokenizer_2=tokenizer_two,
78
- scheduler=noise_scheduler,
79
- image_encoder=image_encoder,
80
  torch_dtype=torch.float16,
81
  force_download=False
82
  )
83
- pipe.unet_encoder = UNet_Encoder
84
 
85
- # Utilisation des transformations d'images
86
- tensor_transfrom = transforms.Compose([
87
- transforms.ToTensor(),
88
- transforms.Normalize([0.5], [0.5]),
89
- ])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
  def pil_to_binary_mask(pil_image, threshold=0):
92
  np_image = np.array(pil_image)
93
  grayscale_image = Image.fromarray(np_image).convert("L")
94
  binary_mask = np.array(grayscale_image) > threshold
95
  mask = np.zeros(binary_mask.shape, dtype=np.uint8)
96
- mask[binary_mask] = 1
97
- return Image.fromarray((mask * 255).astype(np.uint8))
98
-
99
-
 
 
 
100
 
101
  def get_image_from_url(url):
102
  try:
@@ -121,7 +157,8 @@ def encode_image_to_base64(img):
121
  try:
122
  buffered = BytesIO()
123
  img.save(buffered, format="PNG")
124
- return base64.b64encode(buffered.getvalue()).decode("utf-8")
 
125
  except Exception as e:
126
  logging.error(f"Error encoding image: {e}")
127
  raise
@@ -236,16 +273,17 @@ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denois
236
  human_img_orig.paste(out_img, (int(left), int(top)))
237
  return human_img_orig, mask_gray
238
  else:
239
- return images[0], mask_gray , mask
240
 
241
 
242
  @app.route('/tryon-v2', methods=['POST'])
243
  def tryon_v2():
244
 
245
- data = request.json
246
  human_image_data = data['human_image']
247
  garment_image_data = data['garment_image']
248
 
 
249
  human_image = process_image(human_image_data)
250
  garment_image = process_image(garment_image_data)
251
 
@@ -256,34 +294,21 @@ def tryon_v2():
256
  seed = int(data.get('seed', random.randint(0, 9999999)))
257
  categorie = data.get('categorie', 'upper_body')
258
 
 
259
  mask_image = None
260
  if 'mask_image' in data:
261
  mask_image_data = data['mask_image']
262
  mask_image = process_image(mask_image_data)
263
-
264
  human_dict = {
265
  'background': human_image,
266
  'layers': [mask_image] if not use_auto_mask else None,
267
  'composite': None
268
  }
269
-
270
- try:
271
- # Utiliser torch.no_grad() pour économiser de la mémoire
272
- with torch.no_grad():
273
- output_image, mask_image, mask = start_tryon(human_dict, garment_image, description, use_auto_mask, use_auto_crop, denoise_steps, seed, categorie)
274
-
275
- # Vider la mémoire GPU après traitement
276
- clear_gpu_memory()
277
-
278
- return jsonify({
279
- 'image_id': save_image(output_image),
280
- 'mask_gray_id': save_image(mask_image),
281
- 'mask_id': save_image(mask)
282
- })
283
-
284
- except Exception as e:
285
- logging.error(f"Error during tryon process: {e}")
286
- return jsonify({'error': 'An error occurred during tryon process.'}), 500
287
 
288
  def clear_gpu_memory():
289
  torch.cuda.empty_cache()
 
1
  import os
 
2
  from flask import Flask, request, jsonify,send_file
3
  from PIL import Image
4
  from io import BytesIO
 
32
 
33
  app = Flask(__name__)
34
 
 
35
  base_path = 'yisol/IDM-VTON'
36
+ example_path = os.path.join(os.path.dirname(__file__), 'example')
37
 
 
38
  unet = UNet2DConditionModel.from_pretrained(
39
  base_path,
40
  subfolder="unet",
41
  torch_dtype=torch.float16,
42
  force_download=False
43
  )
44
+ unet.requires_grad_(False)
45
  tokenizer_one = AutoTokenizer.from_pretrained(
46
  base_path,
47
  subfolder="tokenizer",
48
+ revision=None,
49
  use_fast=False,
50
  force_download=False
51
  )
52
  tokenizer_two = AutoTokenizer.from_pretrained(
53
  base_path,
54
  subfolder="tokenizer_2",
55
+ revision=None,
56
  use_fast=False,
57
  force_download=False
58
  )
59
  noise_scheduler = DDPMScheduler.from_pretrained(base_path, subfolder="scheduler")
 
 
 
 
 
60
 
61
+ text_encoder_one = CLIPTextModel.from_pretrained(
62
+ base_path,
63
+ subfolder="text_encoder",
64
+ torch_dtype=torch.float16,
65
+ force_download=False
66
+ )
67
+ text_encoder_two = CLIPTextModelWithProjection.from_pretrained(
68
+ base_path,
69
+ subfolder="text_encoder_2",
70
+ torch_dtype=torch.float16,
71
+ force_download=False
72
+ )
73
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(
74
+ base_path,
75
+ subfolder="image_encoder",
76
+ torch_dtype=torch.float16,
77
+ force_download=False
78
+ )
79
+ vae = AutoencoderKL.from_pretrained(base_path,
80
+ subfolder="vae",
81
+ torch_dtype=torch.float16,
82
+ force_download=False
83
+ )
84
 
85
+ UNet_Encoder = UNet2DConditionModel_ref.from_pretrained(
 
86
  base_path,
87
+ subfolder="unet_encoder",
 
 
 
 
 
 
 
 
88
  torch_dtype=torch.float16,
89
  force_download=False
90
  )
 
91
 
92
+ parsing_model = Parsing(0)
93
+ openpose_model = OpenPose(0)
94
+
95
+ UNet_Encoder.requires_grad_(False)
96
+ image_encoder.requires_grad_(False)
97
+ vae.requires_grad_(False)
98
+ unet.requires_grad_(False)
99
+ text_encoder_one.requires_grad_(False)
100
+ text_encoder_two.requires_grad_(False)
101
+ tensor_transfrom = transforms.Compose(
102
+ [
103
+ transforms.ToTensor(),
104
+ transforms.Normalize([0.5], [0.5]),
105
+ ]
106
+ )
107
+
108
+ pipe = TryonPipeline.from_pretrained(
109
+ base_path,
110
+ unet=unet,
111
+ vae=vae,
112
+ feature_extractor= CLIPImageProcessor(),
113
+ text_encoder = text_encoder_one,
114
+ text_encoder_2 = text_encoder_two,
115
+ tokenizer = tokenizer_one,
116
+ tokenizer_2 = tokenizer_two,
117
+ scheduler = noise_scheduler,
118
+ image_encoder=image_encoder,
119
+ torch_dtype=torch.float16,
120
+ force_download=False
121
+ )
122
+ pipe.unet_encoder = UNet_Encoder
123
 
124
  def pil_to_binary_mask(pil_image, threshold=0):
125
  np_image = np.array(pil_image)
126
  grayscale_image = Image.fromarray(np_image).convert("L")
127
  binary_mask = np.array(grayscale_image) > threshold
128
  mask = np.zeros(binary_mask.shape, dtype=np.uint8)
129
+ for i in range(binary_mask.shape[0]):
130
+ for j in range(binary_mask.shape[1]):
131
+ if binary_mask[i, j]:
132
+ mask[i, j] = 1
133
+ mask = (mask * 255).astype(np.uint8)
134
+ output_mask = Image.fromarray(mask)
135
+ return output_mask
136
 
137
  def get_image_from_url(url):
138
  try:
 
157
  try:
158
  buffered = BytesIO()
159
  img.save(buffered, format="PNG")
160
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
161
+ return img_str
162
  except Exception as e:
163
  logging.error(f"Error encoding image: {e}")
164
  raise
 
273
  human_img_orig.paste(out_img, (int(left), int(top)))
274
  return human_img_orig, mask_gray
275
  else:
276
+ return images[0], mask_gray
277
 
278
 
279
  @app.route('/tryon-v2', methods=['POST'])
280
  def tryon_v2():
281
 
282
+ data = request.json
283
  human_image_data = data['human_image']
284
  garment_image_data = data['garment_image']
285
 
286
+ # Process images (base64 ou URL)
287
  human_image = process_image(human_image_data)
288
  garment_image = process_image(garment_image_data)
289
 
 
294
  seed = int(data.get('seed', random.randint(0, 9999999)))
295
  categorie = data.get('categorie', 'upper_body')
296
 
297
+ # Vérifie si 'mask_image' est présent dans les données
298
  mask_image = None
299
  if 'mask_image' in data:
300
  mask_image_data = data['mask_image']
301
  mask_image = process_image(mask_image_data)
302
+
303
  human_dict = {
304
  'background': human_image,
305
  'layers': [mask_image] if not use_auto_mask else None,
306
  'composite': None
307
  }
308
+ output_image, mask_image = start_tryon(human_dict, garment_image, description, use_auto_mask, use_auto_crop, denoise_steps, seed , categorie)
309
+ return jsonify({
310
+ 'image_id': save_image(output_image)
311
+ })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
 
313
  def clear_gpu_memory():
314
  torch.cuda.empty_cache()