Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -19,15 +19,15 @@ from lora_w2w import LoRAw2w
|
|
| 19 |
from huggingface_hub import snapshot_download
|
| 20 |
import spaces
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
device = "cuda"
|
| 31 |
#generator = torch.Generator(device=device)
|
| 32 |
|
| 33 |
models_path = snapshot_download(repo_id="Snapchat/w2w")
|
|
@@ -43,10 +43,10 @@ pinverse = torch.load(f"{models_path}/files/pinverse_1000pc.pt", map_location=to
|
|
| 43 |
unet, vae, text_encoder, tokenizer, noise_scheduler = load_models(device)
|
| 44 |
|
| 45 |
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
|
| 51 |
young = get_direction(df, "Young", pinverse, 1000, device)
|
| 52 |
young = debias(young, "Male", df, pinverse, device)
|
|
|
|
| 19 |
from huggingface_hub import snapshot_download
|
| 20 |
import spaces
|
| 21 |
|
| 22 |
+
|
| 23 |
+
gr.State(generator)
|
| 24 |
+
gr.State(unet)
|
| 25 |
+
gr.State(vae)
|
| 26 |
+
gr.State(text_encoder)
|
| 27 |
+
gr.State(tokenizer)
|
| 28 |
+
gr.State(noise_scheduler)
|
| 29 |
+
gr.State(network)
|
| 30 |
+
device = gr.State("cuda")
|
| 31 |
#generator = torch.Generator(device=device)
|
| 32 |
|
| 33 |
models_path = snapshot_download(repo_id="Snapchat/w2w")
|
|
|
|
| 43 |
unet, vae, text_encoder, tokenizer, noise_scheduler = load_models(device)
|
| 44 |
|
| 45 |
|
| 46 |
+
gr.State(young)
|
| 47 |
+
gr.State(pointy)
|
| 48 |
+
gr.State(wavy)
|
| 49 |
+
gr.State(thick)
|
| 50 |
|
| 51 |
young = get_direction(df, "Young", pinverse, 1000, device)
|
| 52 |
young = debias(young, "Male", df, pinverse, device)
|