ashishtanwer commited on
Commit
e0b9f39
·
1 Parent(s): e3987d7

Create app.py

Browse files

triton
accelerate
transformers
ftfy
bitsandbytes==0.35.0
natsort
safetensors
xformers

Files changed (1) hide show
  1. app.py +41 -0
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import autocast
3
+ from diffusers import StableDiffusionPipeline, DDIMScheduler
4
+ from IPython.display import display
5
+
6
+ model_path = WEIGHTS_DIR # If you want to use previously trained model saved in gdrive, replace this with the full path of model in gdrive
7
+
8
+ pipe = StableDiffusionPipeline.from_pretrained(model_path, safety_checker=None, torch_dtype=torch.float16).to("cuda")
9
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
10
+ pipe.enable_xformers_memory_efficient_attention()
11
+ g_cuda = None
12
+
13
+ #@markdown Can set random seed here for reproducibility.
14
+ g_cuda = torch.Generator(device='cuda')
15
+ seed = 52362 #@param {type:"number"}
16
+ g_cuda.manual_seed(seed)
17
+
18
+ #@title Run for generating images.
19
+
20
+ prompt = "photo of zwx dog in a bucket" #@param {type:"string"}
21
+ negative_prompt = "" #@param {type:"string"}
22
+ num_samples = 4 #@param {type:"number"}
23
+ guidance_scale = 7.5 #@param {type:"number"}
24
+ num_inference_steps = 24 #@param {type:"number"}
25
+ height = 512 #@param {type:"number"}
26
+ width = 512 #@param {type:"number"}
27
+
28
+ with autocast("cuda"), torch.inference_mode():
29
+ images = pipe(
30
+ prompt,
31
+ height=height,
32
+ width=width,
33
+ negative_prompt=negative_prompt,
34
+ num_images_per_prompt=num_samples,
35
+ num_inference_steps=num_inference_steps,
36
+ guidance_scale=guidance_scale,
37
+ generator=g_cuda
38
+ ).images
39
+
40
+ for img in images:
41
+ display(img)