yonishafir commited on
Commit
cae15fc
·
1 Parent(s): 9e516ca
Files changed (1) hide show
  1. app.py +19 -4
app.py CHANGED
@@ -166,9 +166,24 @@ default_negative_prompt = "Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly
166
  app = FaceAnalysis(name='antelopev2', root='./', providers=['CPUExecutionProvider'])
167
  app.prepare(ctx_id=0, det_size=(640, 640))
168
 
169
- base_dir = "./instantID_ckpt/checkpoint_174000"
170
- face_adapter = f'{base_dir}/pytorch_model.bin'
171
- controlnet_path = f'{base_dir}/controlnet'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  base_model_path = f'briaai/BRIA-2.3'
173
  resolution = 1024
174
 
@@ -182,7 +197,7 @@ controlnet = [controlnet_lnmks, controlnet_canny]
182
  device = "cuda" if torch.cuda.is_available() else "cpu"
183
 
184
  image_encoder = CLIPVisionModelWithProjection.from_pretrained(
185
- '/home/ubuntu/BRIA-2.3-InstantID/ip_adapter/image_encoder',
186
  torch_dtype=torch.float16,
187
  )
188
 
 
166
  app = FaceAnalysis(name='antelopev2', root='./', providers=['CPUExecutionProvider'])
167
  app.prepare(ctx_id=0, det_size=(640, 640))
168
 
169
+
170
+ # download checkpoints
171
+ from huggingface_hub import hf_hub_download
172
+
173
+ hf_hub_download(repo_id="briaai/ID_preservation_2.3", filename="controlnet/config.json", local_dir="./checkpoints")
174
+ hf_hub_download(repo_id="briaai/ID_preservation_2.3", filename="controlnet/diffusion_pytorch_model.safetensors", local_dir="./checkpoints")
175
+ hf_hub_download(repo_id="briaai/ID_preservation_2.3", filename="ip-adapter.bin", local_dir="./checkpoints")
176
+ hf_hub_download(repo_id="briaai/ID_preservation_2.3", filename="image_encoder", local_dir="./checkpoints")
177
+
178
+
179
+
180
+ # base_dir = "./instantID_ckpt/checkpoint_174000"
181
+ # face_adapter = f'{base_dir}/pytorch_model.bin'
182
+ # controlnet_path = f'{base_dir}/controlnet'
183
+ face_adapter = f"./checkpoints/ip-adapter.bin"
184
+ controlnet_path = f"./checkpoints/controlnet"
185
+
186
+
187
  base_model_path = f'briaai/BRIA-2.3'
188
  resolution = 1024
189
 
 
197
  device = "cuda" if torch.cuda.is_available() else "cpu"
198
 
199
  image_encoder = CLIPVisionModelWithProjection.from_pretrained(
200
+ f"./checkpoints//image_encoder",
201
  torch_dtype=torch.float16,
202
  )
203