Lifeinhockey commited on
Commit
ccce1ea
·
verified ·
1 Parent(s): 1b3ed30

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -138
app.py CHANGED
@@ -121,37 +121,16 @@ def infer(
121
  progress=gr.Progress(track_tqdm=True)
122
  ):
123
 
124
-
125
  # Генерация изображений с Ip_Adapter ------------------------------------------------------------------------------------------------------------------
126
- if use_ip_adapter: #and ip_source_image is not None and ip_adapter_image is not None:
127
 
128
- # Режим pose_estimation ---------------------------------------------------------------------------------------------------------------------------
129
- # prompt = "A man runs through the park against the background of trees. The man's entire figure, face, arms and legs are visible. Anime style. The best quality."
130
- # negative_prompt = "Blurred details, low resolution, bad anatomy, no face visible, poor image of a man's face, poor quality, artifacts, black and white image."
131
-
132
- # seed = 4
133
- # width = 512
134
- # height = 512
135
- # num_inference_steps = 50
136
- # guidance_scale = 7.5
137
- # lora_scale = 0.7
138
-
139
- # strength_ip = 0.9 # Коэфф. зашумления IP_adapter
140
- # ip_adapter_strength = 0.2 # Сила влияния IP_adapter
141
- # controlnet_conditioning_scale = 0.99 # Сила влияния ControlNet
142
-
143
- # use_ip_adapter = True # Параметр для включения IP_adapter
144
- # ip_source_image = load_image("ControlNet_1.jpeg") # Исходное изображение IP_adapter
145
- # ip_adapter_image = load_image("Run.jpeg") # Контрольное изображение IP_adapter
146
-
147
- # #ip_adapter_mode = "pose_estimation" # Режим работы Ip_Adapter
148
-
149
  if ip_adapter_mode == "pose_estimation":
150
 
151
  print('ip_adapter_mode = ', ip_adapter_mode)
152
 
153
  # Инициализация ControlNet
154
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch_dtype)
 
155
 
156
  generator = torch.Generator(device).manual_seed(seed)
157
 
@@ -210,30 +189,10 @@ def infer(
210
  height=height,
211
  num_inference_steps=num_inference_steps,
212
  guidance_scale=guidance_scale,
213
- controlnet_conditioning_scale=controlnet_conditioning_scale,
214
  generator=generator,
215
  ).images[0]
216
  else:
217
- # Режим edge_detection ---------------------------------------------------------------------------------------------------------------------------
218
- # prompt = "The smiling man. His face and hands are visible. Anime style. The best quality."
219
- # negative_prompt = "Blurred details, low resolution, bad anatomy, no face visible, poor image of a man's face, poor quality, artifacts, black and white image."
220
-
221
- # seed = 9
222
- # width = 512
223
- # height = 512
224
- # num_inference_steps = 50
225
- # guidance_scale = 7.5
226
- # lora_scale = 0.7
227
-
228
- # strength_ip = 0.5 #0.9 # Коэфф. зашумления IP_adapter
229
- # ip_adapter_strength = 0.15 #0.1 # Сила влияния IP_adapter
230
- # controlnet_conditioning_scale = 0.6 # Сила влияния ControlNet
231
-
232
- # use_ip_adapter = True # Параметр для включения IP_adapter
233
- # ip_source_image = load_image("005_6.jpeg") # Исходное изображение IP_adapter
234
- # ip_adapter_image = load_image("edges.jpeg") # Контрольное изображение IP_adapter
235
-
236
- # #ip_adapter_mode = "edge_detection" # Режим работы Ip_Adapter
237
 
238
  if ip_adapter_mode == "edge_detection":
239
 
@@ -318,34 +277,15 @@ def infer(
318
  height=height,
319
  num_inference_steps=num_inference_steps,
320
  guidance_scale=guidance_scale,
321
- controlnet_conditioning_scale=controlnet_conditioning_scale,
322
  generator=generator,
323
  ).images[0]
324
  else:
325
- # Режим depth_map ---------------------------------------------------------------------------------------------------------------------------
326
- # prompt = "The smiling girl, best quality, high quality"
327
- # negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality" #"Blurred details, low resolution, bad anatomy, no face visible, poor image of a man's face, poor quality, artifacts, black and white image."
328
-
329
- # seed = 6
330
- # num_inference_steps = 50
331
- # guidance_scale = 7.5
332
- # lora_scale = 0.7
333
-
334
- # strength_ip = 0.9 # Коэфф. зашумления IP_adapter
335
- # ip_adapter_strength = 0.5 # Сила влияния IP_adapter
336
- # controlnet_conditioning_scale = 0.99 # Сила влияния ControlNet
337
-
338
- # use_ip_adapter = True # Параметр для включения IP_adapter
339
- # ip_adapter_image = load_image("032_3.jpeg")
340
- # depth_map = load_image("depth_map.jpeg")
341
-
342
- # #ip_adapter_mode = "depth_map" # Режим работы Ip_Adapter
343
 
344
  if ip_adapter_mode == "depth_map":
345
 
346
  print('ip_adapter_mode = ', ip_adapter_mode)
347
-
348
-
349
  # Инициализация ControlNet
350
  controlnet_model_path = "lllyasviel/control_v11f1p_sd15_depth"
351
  controlnet = ControlNetModel.from_pretrained(controlnet_model_path, torch_dtype=torch.float16)
@@ -368,39 +308,21 @@ def infer(
368
  num_inference_steps=num_inference_steps,
369
  strength=strength_ip, # Коэфф. зашумления, чем больше, тем больше меняется результирующее изображение относитенльно исходного
370
  guidance_scale=guidance_scale,
371
- controlnet_conditioning_scale=controlnet_conditioning_scale,
372
  generator=generator,
373
  ).images[0]
374
  else:
375
  # Генерация изображений с ControlNet ----------------------------------------------------------------------------------------------------------------
376
- if use_control_net: #and control_image is not None and cn_source_image is not None:
377
-
378
- # Режим pose_estimation ---------------------------------------------------------------------------------------------------------------------------
379
- # prompt = "A man runs through the park against the background of trees. The man's entire figure, face, arms and legs are visible. Anime style. The best quality."
380
- # negative_prompt = "Blurred details, low resolution, bad anatomy, no face visible, poor image of a man's face, poor quality, artifacts, black and white image."
381
 
382
- # seed = 444
383
- # width = 512
384
- # height = 512
385
- # num_inference_steps = 50
386
- # guidance_scale = 7.5
387
- # lora_scale = 0.7
388
-
389
- # strength_cn = 0.9 # Коэфф. зашумления ControlNet
390
- # control_strength = 0.92 # Сила влияния ControlNet
391
-
392
- # use_control_net = True # Параметр для включения ControlNet
393
- # cn_source_image = load_image("ControlNet_1.jpeg") # Исходное изображение ControlNet
394
- # control_image = load_image("Run.jpeg") # Контрольное изображение ControlNet
395
-
396
- # #control_mode = "pose_estimation" # Режим работы ControlNet
397
 
398
  if control_mode == "pose_estimation":
399
 
400
  print('control_mode = ', control_mode)
401
 
402
  # Инициализация ControlNet
403
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch_dtype)
 
404
 
405
  generator = torch.Generator(device).manual_seed(seed)
406
 
@@ -460,31 +382,13 @@ def infer(
460
  generator=generator
461
  ).images[0]
462
  else:
463
- # Режим edge_detection ---------------------------------------------------------------------------------------------------------------------------
464
- # prompt = "The smiling girl. Anime style. Best quality, high quality" # "the mona lisa"
465
- # negative_prompt = "Blurred details, low resolution, bad anatomy, no face visible, poor image of a man's face, poor quality, artifacts, black and white image."
466
-
467
- # seed = 8 # 1 8 12 14 18
468
- # width = 512
469
- # height = 512
470
- # num_inference_steps = 50
471
- # guidance_scale = 7.5
472
- # lora_scale = 0.7
473
-
474
- # strength_cn = 0.2 # Коэфф. зашумления ControlNet
475
- # control_strength = 0.8 # Сила влияния ControlNet
476
-
477
- # use_control_net = True # Параметр для включения ControlNet
478
- # cn_source_image = load_image("edges_w.jpeg") # Исходное изображение ControlNet
479
- # control_image = load_image("027_0_1.jpeg") # Контрольное изображение ControlNet
480
-
481
- # #control_mode = "edge_detection" # Режим работы ControlNet
482
 
483
  if control_mode == "edge_detection":
484
 
485
  print('control_mode = ', control_mode)
486
 
487
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16, use_safetensors=True)
 
488
 
489
  generator = torch.Generator(device).manual_seed(seed)
490
 
@@ -511,25 +415,6 @@ def infer(
511
  generator=generator
512
  ).images[0]
513
  else:
514
- # Режим depth_map ---------------------------------------------------------------------------------------------------------------------------
515
- # prompt = "lego batman and robin" #"Lego Harry Potter and Jean Granger" #"Harry Potter and Hagrid in the lego style" #"lego batman and robin"
516
- # negative_prompt = "Blurred details, low resolution, bad anatomy, no face visible, poor image of a man's face, poor quality, artifacts, black and white image."
517
-
518
- # seed = 8
519
- # width = 512
520
- # height = 512
521
- # num_inference_steps = 50
522
- # guidance_scale = 7.5
523
- # lora_scale = 0.7
524
-
525
- # strength_cn = 1.0 # Коэфф. зашумления ControlNet
526
- # control_strength = 0.0 # Сила влияния ControlNet
527
-
528
- # use_control_net = True # Параметр для включения ControlNet
529
- # cn_source_image = load_image("edges_w.jpeg") # Исходное изображение ControlNet
530
- # control_image = load_image("014_3.jpeg") # Контрольное изображение ControlNet
531
-
532
- # #control_mode = "depth_map" # Режим работы ControlNet
533
 
534
  if control_mode == "depth_map":
535
 
@@ -538,7 +423,8 @@ def infer(
538
  depth_estimator = pipeline("depth-estimation")
539
  depth_map = get_depth_map(control_image, depth_estimator).unsqueeze(0).half().to(device)
540
 
541
- controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11f1p_sd15_depth", torch_dtype=torch.float16, use_safetensors=True)
 
542
 
543
  generator = torch.Generator(device).manual_seed(seed)
544
 
@@ -566,18 +452,10 @@ def infer(
566
  ).images[0]
567
  else:
568
  # Генерация изображений с LORA без ControlNet и IP_Adapter ---------------------------------------------------------------------------------------------
569
- # prompt = "A young man in anime style. The image is characterized by high definition and resolution. Handsome, thoughtful man, attentive eyes. The man is depicted in the foreground, close-up or in the middle. High-quality images of the face, eyes, nose, lips, hands and clothes. The background and background are blurred and indistinct. The play of light and shadow is visible on the face and clothes."
570
- # negative_prompt = "Blurred details, low resolution, bad anatomy, no face visible, poor image of a man's face, poor quality, artifacts, black and white image."
571
-
572
- # seed = 5
573
- # width = 512
574
- # height = 512
575
- # num_inference_steps = 30
576
- # guidance_scale = 7.5
577
- # lora_scale = 0.7
578
 
579
  # Инициализация ControlNet
580
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch_dtype)
 
581
 
582
  generator = torch.Generator(device).manual_seed(seed)
583
 
 
121
  progress=gr.Progress(track_tqdm=True)
122
  ):
123
 
 
124
  # Генерация изображений с Ip_Adapter ------------------------------------------------------------------------------------------------------------------
125
+ if use_ip_adapter: and ip_source_image is not None and ip_adapter_image is not None:
126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
  if ip_adapter_mode == "pose_estimation":
128
 
129
  print('ip_adapter_mode = ', ip_adapter_mode)
130
 
131
  # Инициализация ControlNet
132
+ controlnet_model_path = "lllyasviel/sd-controlnet-openpose"
133
+ controlnet = ControlNetModel.from_pretrained(controlnet_model_path, torch_dtype=torch_dtype)
134
 
135
  generator = torch.Generator(device).manual_seed(seed)
136
 
 
189
  height=height,
190
  num_inference_steps=num_inference_steps,
191
  guidance_scale=guidance_scale,
192
+ controlnet_conditioning_scale=0.99, #controlnet_conditioning_scale,
193
  generator=generator,
194
  ).images[0]
195
  else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
 
197
  if ip_adapter_mode == "edge_detection":
198
 
 
277
  height=height,
278
  num_inference_steps=num_inference_steps,
279
  guidance_scale=guidance_scale,
280
+ controlnet_conditioning_scale=0.6, #controlnet_conditioning_scale,
281
  generator=generator,
282
  ).images[0]
283
  else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
 
285
  if ip_adapter_mode == "depth_map":
286
 
287
  print('ip_adapter_mode = ', ip_adapter_mode)
288
+
 
289
  # Инициализация ControlNet
290
  controlnet_model_path = "lllyasviel/control_v11f1p_sd15_depth"
291
  controlnet = ControlNetModel.from_pretrained(controlnet_model_path, torch_dtype=torch.float16)
 
308
  num_inference_steps=num_inference_steps,
309
  strength=strength_ip, # Коэфф. зашумления, чем больше, тем больше меняется результирующее изображение относитенльно исходного
310
  guidance_scale=guidance_scale,
311
+ controlnet_conditioning_scale=0.99, #controlnet_conditioning_scale,
312
  generator=generator,
313
  ).images[0]
314
  else:
315
  # Генерация изображений с ControlNet ----------------------------------------------------------------------------------------------------------------
 
 
 
 
 
316
 
317
+ if use_control_net: and control_image is not None and cn_source_image is not None:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318
 
319
  if control_mode == "pose_estimation":
320
 
321
  print('control_mode = ', control_mode)
322
 
323
  # Инициализация ControlNet
324
+ controlnet_model_path = "lllyasviel/sd-controlnet-openpose"
325
+ controlnet = ControlNetModel.from_pretrained(controlnet_model_path, torch_dtype=torch_dtype)
326
 
327
  generator = torch.Generator(device).manual_seed(seed)
328
 
 
382
  generator=generator
383
  ).images[0]
384
  else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
385
 
386
  if control_mode == "edge_detection":
387
 
388
  print('control_mode = ', control_mode)
389
 
390
+ controlnet_model_path = "lllyasviel/sd-controlnet-canny"
391
+ controlnet = ControlNetModel.from_pretrained(controlnet_model_path, torch_dtype=torch.float16, use_safetensors=True)
392
 
393
  generator = torch.Generator(device).manual_seed(seed)
394
 
 
415
  generator=generator
416
  ).images[0]
417
  else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
418
 
419
  if control_mode == "depth_map":
420
 
 
423
  depth_estimator = pipeline("depth-estimation")
424
  depth_map = get_depth_map(control_image, depth_estimator).unsqueeze(0).half().to(device)
425
 
426
+ controlnet_model_path = "lllyasviel/control_v11f1p_sd15_depth"
427
+ controlnet = ControlNetModel.from_pretrained(controlnet_model_path, torch_dtype=torch.float16, use_safetensors=True)
428
 
429
  generator = torch.Generator(device).manual_seed(seed)
430
 
 
452
  ).images[0]
453
  else:
454
  # Генерация изображений с LORA без ControlNet и IP_Adapter ---------------------------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
455
 
456
  # Инициализация ControlNet
457
+ controlnet_model_path = "lllyasviel/sd-controlnet-openpose"
458
+ controlnet = ControlNetModel.from_pretrained(controlnet_model_path, torch_dtype=torch_dtype)
459
 
460
  generator = torch.Generator(device).manual_seed(seed)
461