awacke1 commited on
Commit
b0592e3
·
verified ·
1 Parent(s): bae3d70

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -33
app.py CHANGED
@@ -44,6 +44,7 @@ USE_TORCH_COMPILE = 0
44
  ENABLE_CPU_OFFLOAD = 0
45
 
46
 
 
47
  if torch.cuda.is_available():
48
  pipe = StableDiffusionXLPipeline.from_pretrained(
49
  "fluently/Fluently-XL-v4",
@@ -58,41 +59,42 @@ if torch.cuda.is_available():
58
 
59
  pipe.to("cuda")
60
 
61
-
62
- def generate(
63
- prompt: str,
64
- negative_prompt: str = "",
65
- use_negative_prompt: bool = False,
66
- seed: int = 0,
67
- width: int = 1024,
68
- height: int = 1024,
69
- guidance_scale: float = 3,
70
- randomize_seed: bool = False,
71
- ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
 
74
- seed = int(randomize_seed_fn(seed, randomize_seed))
75
-
76
- if not use_negative_prompt:
77
- negative_prompt = "" # type: ignore
78
-
79
- images = pipe(
80
- prompt=prompt,
81
- negative_prompt=negative_prompt,
82
- width=width,
83
- height=height,
84
- guidance_scale=guidance_scale,
85
- num_inference_steps=20,
86
- num_images_per_prompt=1,
87
- cross_attention_kwargs={"scale": 0.65},
88
- output_type="pil",
89
- ).images
90
- image_paths = [save_image(img, prompt) for img in images]
91
-
92
- download_links = [create_download_link(path) for path in image_paths]
93
-
94
- print(image_paths)
95
- return image_paths, seed, download_links
96
 
97
  examples = [
98
  "a modern hospital room with advanced medical equipment and a patient resting comfortably",
 
44
  ENABLE_CPU_OFFLOAD = 0
45
 
46
 
47
+
48
  if torch.cuda.is_available():
49
  pipe = StableDiffusionXLPipeline.from_pretrained(
50
  "fluently/Fluently-XL-v4",
 
59
 
60
  pipe.to("cuda")
61
 
62
+ def generate(
63
+ prompt: str,
64
+ negative_prompt: str = "",
65
+ use_negative_prompt: bool = False,
66
+ seed: int = 0,
67
+ width: int = 1024,
68
+ height: int = 1024,
69
+ guidance_scale: float = 3,
70
+ randomize_seed: bool = False,
71
+ ):
72
+ seed = int(randomize_seed_fn(seed, randomize_seed))
73
+
74
+ if not use_negative_prompt:
75
+ negative_prompt = "" # type: ignore
76
+
77
+ images = pipe(
78
+ prompt=prompt,
79
+ negative_prompt=negative_prompt,
80
+ width=width,
81
+ height=height,
82
+ guidance_scale=guidance_scale,
83
+ num_inference_steps=20,
84
+ num_images_per_prompt=1,
85
+ cross_attention_kwargs={"scale": 0.65},
86
+ output_type="pil",
87
+ ).images
88
+ image_paths = [save_image(img, prompt) for img in images]
89
+
90
+ download_links = [create_download_link(path) for path in image_paths]
91
+
92
+ print(image_paths)
93
+ return image_paths, seed, download_links
94
+ else:
95
+ st.warning("CUDA is not available. The demo may not work on CPU.")
96
 
97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
  examples = [
100
  "a modern hospital room with advanced medical equipment and a patient resting comfortably",