amildravid4292 commited on
Commit
67694b7
·
verified ·
1 Parent(s): d64a5a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -15
app.py CHANGED
@@ -20,20 +20,7 @@ from huggingface_hub import snapshot_download
20
  import spaces
21
 
22
 
23
- generator = gr.State()
24
- unet = gr.State()
25
- vae = gr.State()
26
- text_encoder = gr.State()
27
- tokenizer = gr.State()
28
- noise_scheduler = gr.State()
29
- network = gr.State()
30
- #device = gr.State(torch.device("cuda"))
31
- device = "cuda"
32
- #generator = torch.Generator(device=device)
33
- young = gr.State()
34
- pointy = gr.State()
35
- wavy = gr.State()
36
- thick = gr.State()
37
 
38
  models_path = snapshot_download(repo_id="Snapchat/w2w")
39
 
@@ -45,7 +32,7 @@ df = torch.load(f"{models_path}/files/identity_df.pt")
45
  weight_dimensions = torch.load(f"{models_path}/files/weight_dimensions.pt")
46
  pinverse = torch.load(f"{models_path}/files/pinverse_1000pc.pt", map_location=torch.device('cpu')).bfloat16().to(device)
47
 
48
- unet, vae, text_encoder, tokenizer, noise_scheduler = load_models(device)
49
 
50
 
51
 
@@ -408,6 +395,29 @@ intro = """
408
 
409
 
410
  with gr.Blocks(css="style.css") as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
411
  gr.HTML(intro)
412
 
413
  gr.Markdown("""<div style="text-align: justify;"> In this demo, you can get an identity-encoding model by sampling or inverting. To use a model previously downloaded from this demo see \"Uploading a model\" in the Advanced Options. Next, you can generate new images from it, or edit the identity encoded in the model and generate images from the edited model. We provide detailed instructions and tips at the bottom of the page.""")
 
20
  import spaces
21
 
22
 
23
+
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  models_path = snapshot_download(repo_id="Snapchat/w2w")
26
 
 
32
  weight_dimensions = torch.load(f"{models_path}/files/weight_dimensions.pt")
33
  pinverse = torch.load(f"{models_path}/files/pinverse_1000pc.pt", map_location=torch.device('cpu')).bfloat16().to(device)
34
 
35
+
36
 
37
 
38
 
 
395
 
396
 
397
  with gr.Blocks(css="style.css") as demo:
398
+ generator = gr.State()
399
+ unet = gr.State()
400
+ vae = gr.State()
401
+ text_encoder = gr.State()
402
+ tokenizer = gr.State()
403
+ noise_scheduler = gr.State()
404
+ network = gr.State()
405
+ #device = gr.State(torch.device("cuda"))
406
+ device = "cuda"
407
+ #generator = torch.Generator(device=device)
408
+ young = gr.State()
409
+ pointy = gr.State()
410
+ wavy = gr.State()
411
+ thick = gr.State()
412
+ unet, vae, text_encoder, tokenizer, noise_scheduler = load_models(device)
413
+
414
+
415
+
416
+
417
+
418
+
419
+
420
+
421
  gr.HTML(intro)
422
 
423
  gr.Markdown("""<div style="text-align: justify;"> In this demo, you can get an identity-encoding model by sampling or inverting. To use a model previously downloaded from this demo see \"Uploading a model\" in the Advanced Options. Next, you can generate new images from it, or edit the identity encoded in the model and generate images from the edited model. We provide detailed instructions and tips at the bottom of the page.""")