Spaces:
Running
on
Zero
Running
on
Zero
Test multiple generations on small models
Browse files
app.py
CHANGED
@@ -55,23 +55,23 @@ def run_xlnc(prompt, negative_prompt=None, guidance_scale=7.0, pag_scale=3.0, pa
|
|
55 |
|
56 |
return image, seed
|
57 |
|
58 |
-
@spaces.GPU(duration=
|
59 |
def run_sc(prompt, negative_prompt=None, guidance_scale=7.0, pag_scale=3.0, pag_layers=["mid"], randomize_seed=True, seed=42, progress=gr.Progress(track_tqdm=True)):
|
60 |
if(randomize_seed):
|
61 |
seed = random.randint(0, 9007199254740991)
|
62 |
|
63 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
64 |
-
image = pipe_sc(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, pag_scale=pag_scale, pag_applied_layers=pag_layers, generator=generator, num_inference_steps=25).images
|
65 |
|
66 |
return image, seed
|
67 |
|
68 |
-
@spaces.GPU(duration=
|
69 |
def run_snc(prompt, negative_prompt=None, guidance_scale=7.0, pag_scale=3.0, pag_layers=["mid"], randomize_seed=True, seed=42, progress=gr.Progress(track_tqdm=True)):
|
70 |
if(randomize_seed):
|
71 |
seed = random.randint(0, 9007199254740991)
|
72 |
|
73 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
74 |
-
image = pipe_snc(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, pag_scale=pag_scale, pag_applied_layers=pag_layers, generator=generator, num_inference_steps=25).images
|
75 |
|
76 |
return image, seed
|
77 |
|
|
|
55 |
|
56 |
return image, seed
|
57 |
|
58 |
+
@spaces.GPU(duration=15)
|
59 |
def run_sc(prompt, negative_prompt=None, guidance_scale=7.0, pag_scale=3.0, pag_layers=["mid"], randomize_seed=True, seed=42, progress=gr.Progress(track_tqdm=True)):
|
60 |
if(randomize_seed):
|
61 |
seed = random.randint(0, 9007199254740991)
|
62 |
|
63 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
64 |
+
image = pipe_sc(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, pag_scale=pag_scale, pag_applied_layers=pag_layers, generator=generator, num_inference_steps=25, num_images_per_prompt=4).images
|
65 |
|
66 |
return image, seed
|
67 |
|
68 |
+
@spaces.GPU(duration=15)
|
69 |
def run_snc(prompt, negative_prompt=None, guidance_scale=7.0, pag_scale=3.0, pag_layers=["mid"], randomize_seed=True, seed=42, progress=gr.Progress(track_tqdm=True)):
|
70 |
if(randomize_seed):
|
71 |
seed = random.randint(0, 9007199254740991)
|
72 |
|
73 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
74 |
+
image = pipe_snc(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, pag_scale=pag_scale, pag_applied_layers=pag_layers, generator=generator, num_inference_steps=25, num_images_per_prompt=4).images
|
75 |
|
76 |
return image, seed
|
77 |
|