Lifeinhockey commited on
Commit
d2faec8
·
verified ·
1 Parent(s): c8c91af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -103,7 +103,7 @@ def infer(
103
  ):
104
  generator = torch.Generator(device).manual_seed(seed)
105
 
106
- # Генерация с Ip_Adapter
107
  if use_ip_adapter and ip_source_image is not None and ip_adapter_image is not None:
108
  #pipe_controlnet = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
109
  pipe_ip_adapter = StableDiffusionControlNetPipeline.from_pretrained(
@@ -166,7 +166,7 @@ def infer(
166
  generator=generator,
167
  ).images[0]
168
  else:
169
- # Генерация с ControlNet
170
  if use_control_net and control_image is not None and cn_source_image is not None:
171
  pipe_controlnet = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
172
  model_default,
@@ -224,7 +224,7 @@ def infer(
224
  generator=generator
225
  ).images[0]
226
  else:
227
- # Генерация без ControlNet и IP_adapter
228
  if model != model_default:
229
  pipe = StableDiffusionPipeline.from_pretrained(model, torch_dtype=torch_dtype).to(device)
230
  prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
@@ -249,7 +249,8 @@ def infer(
249
 
250
  image = pipe(**params).images[0]
251
 
252
- return image
 
253
 
254
  examples = [
255
  "A young man in anime style. The image is characterized by high definition and resolution. Handsome, thoughtful man, attentive eyes. The man is depicted in the foreground, close-up or in the middle. High-quality images of the face, eyes, nose, lips, hands and clothes. The background and background are blurred and indistinct. The play of light and shadow is visible on the face and clothes.",
@@ -272,6 +273,7 @@ available_models = [
272
  "CompVis/stable-diffusion-v1-4",
273
  ]
274
 
 
275
  with gr.Blocks(css=css) as demo:
276
  with gr.Column(elem_id="col-container"):
277
  gr.Markdown(" # Text-to-Image Gradio Template from V. Gorsky")
@@ -351,7 +353,7 @@ with gr.Blocks(css=css) as demo:
351
  value=512,
352
  )
353
 
354
- # ControlNet ---------------------------------------------------------------------------------
355
  with gr.Blocks():
356
  with gr.Row():
357
  use_control_net = gr.Checkbox(
@@ -397,7 +399,7 @@ with gr.Blocks(css=css) as demo:
397
  outputs=control_net_options
398
  )
399
 
400
- # IP_Adapter ---------------------------------------------------------------------------------
401
  with gr.Blocks():
402
  with gr.Row():
403
  use_ip_adapter = gr.Checkbox(
@@ -442,7 +444,7 @@ with gr.Blocks(css=css) as demo:
442
  inputs=use_ip_adapter,
443
  outputs=ip_adapter_options
444
  )
445
- # --------------------------------------------------------------------------------------
446
 
447
  gr.Examples(examples=examples, inputs=[prompt], label="Examples for prompt:")
448
  gr.Examples(examples=examples_negative, inputs=[negative_prompt], label="Examples for negative prompt:")
 
103
  ):
104
  generator = torch.Generator(device).manual_seed(seed)
105
 
106
+ # Генерация с Ip_Adapter ------------------------------------------------------------------------------------------------------------------
107
  if use_ip_adapter and ip_source_image is not None and ip_adapter_image is not None:
108
  #pipe_controlnet = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
109
  pipe_ip_adapter = StableDiffusionControlNetPipeline.from_pretrained(
 
166
  generator=generator,
167
  ).images[0]
168
  else:
169
+ # Генерация с ControlNet ----------------------------------------------------------------------------------------------------------------
170
  if use_control_net and control_image is not None and cn_source_image is not None:
171
  pipe_controlnet = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
172
  model_default,
 
224
  generator=generator
225
  ).images[0]
226
  else:
227
+ # Генерация без ControlNet и IP_adapter ---------------------------------------------------------------------------------------------
228
  if model != model_default:
229
  pipe = StableDiffusionPipeline.from_pretrained(model, torch_dtype=torch_dtype).to(device)
230
  prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
 
249
 
250
  image = pipe(**params).images[0]
251
 
252
+ return image
253
+ # ---------------------------------------------------------------------------------------------------------------------------------------------
254
 
255
  examples = [
256
  "A young man in anime style. The image is characterized by high definition and resolution. Handsome, thoughtful man, attentive eyes. The man is depicted in the foreground, close-up or in the middle. High-quality images of the face, eyes, nose, lips, hands and clothes. The background and background are blurred and indistinct. The play of light and shadow is visible on the face and clothes.",
 
273
  "CompVis/stable-diffusion-v1-4",
274
  ]
275
 
276
+ # -------------------------------------------------------------------------------------------------------------------------------------------------
277
  with gr.Blocks(css=css) as demo:
278
  with gr.Column(elem_id="col-container"):
279
  gr.Markdown(" # Text-to-Image Gradio Template from V. Gorsky")
 
353
  value=512,
354
  )
355
 
356
+ # ControlNet -----------------------------------------------------------------------------------------------
357
  with gr.Blocks():
358
  with gr.Row():
359
  use_control_net = gr.Checkbox(
 
399
  outputs=control_net_options
400
  )
401
 
402
+ # IP_Adapter ------------------------------------------------------------------------------------------------
403
  with gr.Blocks():
404
  with gr.Row():
405
  use_ip_adapter = gr.Checkbox(
 
444
  inputs=use_ip_adapter,
445
  outputs=ip_adapter_options
446
  )
447
+ # ---------------------------------------------------------------------------------------------------------
448
 
449
  gr.Examples(examples=examples, inputs=[prompt], label="Examples for prompt:")
450
  gr.Examples(examples=examples_negative, inputs=[negative_prompt], label="Examples for negative prompt:")