yonishafir commited on
Commit
59ed86e
·
verified ·
1 Parent(s): 5ced797

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -39
app.py CHANGED
@@ -162,11 +162,11 @@ app.prepare(ctx_id=0, det_size=(640, 640))
162
 
163
 
164
  # download checkpoints
165
- hf_hub_download(repo_id="briaai/ID_preservation_2.3", filename="controlnet/config.json", local_dir="./checkpoints")
166
- hf_hub_download(repo_id="briaai/ID_preservation_2.3", filename="controlnet/diffusion_pytorch_model.safetensors", local_dir="./checkpoints")
167
- hf_hub_download(repo_id="briaai/ID_preservation_2.3", filename="ip-adapter.bin", local_dir="./checkpoints")
168
- hf_hub_download(repo_id="briaai/ID_preservation_2.3", filename="image_encoder/pytorch_model.bin", local_dir="./checkpoints")
169
- hf_hub_download(repo_id="briaai/ID_preservation_2.3", filename="image_encoder/config.json", local_dir="./checkpoints")
170
 
171
 
172
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -265,30 +265,6 @@ def generate_image(image_path, prompt, num_steps, guidance_scale, seed, num_imag
265
 
266
  generator = torch.Generator(device=device).manual_seed(seed)
267
 
268
- # if lora_name != CURRENT_LORA_NAME: # Check if LoRA needs to be changed
269
- # if CURRENT_LORA_NAME is not None: # If a LoRA is already loaded, unload it
270
- # pipe.disable_lora()
271
- # pipe.unfuse_lora()
272
- # pipe.unload_lora_weights()
273
- # print(f"Unloaded LoRA: {CURRENT_LORA_NAME}")
274
-
275
- # if lora_name != "": # Load the new LoRA if specified
276
- # # pipe.enable_model_cpu_offload()
277
- # lora_path = os.path.join(lora_base_path, lora_name, "pytorch_lora_weights.safetensors")
278
- # pipe.load_lora_weights(lora_path)
279
- # pipe.fuse_lora(lora_scale)
280
- # pipe.enable_lora()
281
-
282
- # # lora_prefix = Loras_dict[lora_name]
283
-
284
- # print(f"Loaded new LoRA: {lora_name}")
285
-
286
- # # Update the current LoRA name
287
- # CURRENT_LORA_NAME = lora_name
288
-
289
- # if lora_name != "":
290
- # full_prompt = f"{Loras_dict[lora_name]} + " " + {prompt}"
291
- # else:
292
  full_prompt = prompt
293
 
294
  print("Start inference...")
@@ -353,8 +329,6 @@ with gr.Blocks(css=css) as demo:
353
  info="Describe what you want to generate or modify in the image."
354
  )
355
 
356
- # lora_name = gr.Dropdown(choices=lora_names, label="LoRA", value="", info="Select a LoRA name from the list, not selecting any will disable LoRA.")
357
-
358
  submit = gr.Button("Submit", variant="primary")
359
 
360
  with gr.Accordion(open=False, label="Advanced Options"):
@@ -400,13 +374,7 @@ with gr.Blocks(css=css) as demo:
400
  step=0.01,
401
  value=0.4,
402
  )
403
- # lora_scale = gr.Slider(
404
- # label="lora_scale",
405
- # minimum=0.0,
406
- # maximum=1.0,
407
- # step=0.01,
408
- # value=0.7,
409
- # )
410
  seed = gr.Slider(
411
  label="Seed",
412
  minimum=0,
@@ -427,7 +395,6 @@ with gr.Blocks(css=css) as demo:
427
  api_name=False,
428
  ).then(
429
  fn=generate_image,
430
- # inputs=[img_file, prompt, num_steps, guidance_scale, seed, num_images, ip_adapter_scale, kps_scale, canny_scale, lora_name, lora_scale],
431
  inputs=[img_file, prompt, num_steps, guidance_scale, seed, num_images, ip_adapter_scale, kps_scale, canny_scale],
432
  outputs=[gallery]
433
  )
 
162
 
163
 
164
  # download checkpoints
165
+ hf_hub_download(repo_id="briaai/ID_preservation_2.3_auraFaceEnc", filename="checkpoint_105000/controlnet/config.json", local_dir="./checkpoints")
166
+ hf_hub_download(repo_id="briaai/ID_preservation_2.3_auraFaceEnc", filename="checkpoint_105000/controlnet/diffusion_pytorch_model.safetensors", local_dir="./checkpoints")
167
+ hf_hub_download(repo_id="briaai/ID_preservation_2.3_auraFaceEnc", filename="checkpoint_105000/ip-adapter.bin", local_dir="./checkpoints")
168
+ hf_hub_download(repo_id="briaai/ID_preservation_2.3_auraFaceEnc", filename="image_encoder/pytorch_model.bin", local_dir="./checkpoints")
169
+ hf_hub_download(repo_id="briaai/ID_preservation_2.3_auraFaceEnc", filename="image_encoder/config.json", local_dir="./checkpoints")
170
 
171
 
172
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
265
 
266
  generator = torch.Generator(device=device).manual_seed(seed)
267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
  full_prompt = prompt
269
 
270
  print("Start inference...")
 
329
  info="Describe what you want to generate or modify in the image."
330
  )
331
 
 
 
332
  submit = gr.Button("Submit", variant="primary")
333
 
334
  with gr.Accordion(open=False, label="Advanced Options"):
 
374
  step=0.01,
375
  value=0.4,
376
  )
377
+
 
 
 
 
 
 
378
  seed = gr.Slider(
379
  label="Seed",
380
  minimum=0,
 
395
  api_name=False,
396
  ).then(
397
  fn=generate_image,
 
398
  inputs=[img_file, prompt, num_steps, guidance_scale, seed, num_images, ip_adapter_scale, kps_scale, canny_scale],
399
  outputs=[gallery]
400
  )