jingyangcarl commited on
Commit
300339e
·
1 Parent(s): 6d5599a

debug zerogpu

Browse files
Files changed (4) hide show
  1. app.py +55 -137
  2. app_.py +0 -72
  3. app_sd.py +154 -0
  4. model.py +2 -0
app.py CHANGED
@@ -1,154 +1,72 @@
1
- import gradio as gr
2
- import numpy as np
3
- import random
4
 
5
- import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
7
  import torch
8
 
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
-
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
-
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
22
-
23
-
24
- @spaces.GPU #[uncomment to use ZeroGPU]
25
- def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
35
- ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
 
39
- generator = torch.Generator().manual_seed(seed)
 
40
 
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
- ).images[0]
50
 
51
- return image, seed
 
52
 
 
53
 
54
- examples = [
55
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
- "An astronaut riding a green horse",
57
- "A delicious ceviche cheesecake slice",
58
- ]
59
-
60
- css = """
61
- #col-container {
62
- margin: 0 auto;
63
- max-width: 640px;
64
- }
65
- """
66
 
67
- with gr.Blocks(css=css) as demo:
68
- with gr.Column(elem_id="col-container"):
69
- gr.Markdown(" # Text-to-Image Gradio Template")
 
 
 
 
70
 
 
71
  with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
- show_label=False,
75
- max_lines=1,
76
- placeholder="Enter your prompt",
77
- container=False,
78
- )
79
-
80
- run_button = gr.Button("Run", scale=0, variant="primary")
81
-
82
- result = gr.Image(label="Result", show_label=False)
83
-
84
- with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
90
- )
91
-
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
98
- )
99
-
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
-
102
- with gr.Row():
103
- width = gr.Slider(
104
- label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
109
- )
110
-
111
- height = gr.Slider(
112
- label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
117
- )
118
-
119
- with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
- )
127
-
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=50,
132
- step=1,
133
- value=2, # Replace with defaults that work for your model
134
  )
 
 
 
 
 
 
135
 
136
- gr.Examples(examples=examples, inputs=[prompt])
 
 
 
 
 
137
  gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
- fn=infer,
140
- inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
- width,
146
- height,
147
- guidance_scale,
148
- num_inference_steps,
149
- ],
150
- outputs=[result, seed],
151
  )
152
 
153
  if __name__ == "__main__":
154
- demo.launch()
 
1
+ #!/usr/bin/env python
 
 
2
 
3
+ import gradio as gr
 
4
  import torch
5
 
6
+ from app_canny import create_demo as create_demo_canny
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ from model import Model
9
+ from settings import ALLOW_CHANGING_BASE_MODEL, DEFAULT_MODEL_ID, SHOW_DUPLICATE_BUTTON
10
 
11
+ DESCRIPTION = "# Material Authoring Demo v0.1. Under Construction"
 
 
 
 
 
 
 
 
12
 
13
+ if not torch.cuda.is_available():
14
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
15
 
16
+ model = Model(base_model_id=DEFAULT_MODEL_ID, task_name="Canny")
17
 
18
+ with gr.Blocks() as demo:
19
+ gr.Markdown(DESCRIPTION)
20
+ gr.DuplicateButton(
21
+ value="Duplicate Space for private use",
22
+ elem_id="duplicate-button",
23
+ visible=SHOW_DUPLICATE_BUTTON,
24
+ )
 
 
 
 
 
25
 
26
+ with gr.Tabs():
27
+ with gr.Tab("Canny"):
28
+ create_demo_canny(model.process_canny)
29
+ with gr.Tab("Texnet"):
30
+ create_demo_canny(model.process_canny)
31
+ with gr.Tab("Matnet"):
32
+ create_demo_canny(model.process_canny)
33
 
34
+ with gr.Accordion(label="Base model", open=False):
35
  with gr.Row():
36
+ with gr.Column(scale=5):
37
+ current_base_model = gr.Text(label="Current base model")
38
+ with gr.Column(scale=1):
39
+ check_base_model_button = gr.Button("Check current base model")
40
+ with gr.Row():
41
+ with gr.Column(scale=5):
42
+ new_base_model_id = gr.Text(
43
+ label="New base model",
44
+ max_lines=1,
45
+ placeholder="stable-diffusion-v1-5/stable-diffusion-v1-5",
46
+ info="The base model must be compatible with Stable Diffusion v1.5.",
47
+ interactive=ALLOW_CHANGING_BASE_MODEL,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  )
49
+ with gr.Column(scale=1):
50
+ change_base_model_button = gr.Button("Change base model", interactive=ALLOW_CHANGING_BASE_MODEL)
51
+ if not ALLOW_CHANGING_BASE_MODEL:
52
+ gr.Markdown(
53
+ """The base model is not allowed to be changed in this Space so as not to slow down the demo, but it can be changed if you duplicate the Space."""
54
+ )
55
 
56
+ check_base_model_button.click(
57
+ fn=lambda: model.base_model_id,
58
+ outputs=current_base_model,
59
+ queue=False,
60
+ api_name="check_base_model",
61
+ )
62
  gr.on(
63
+ triggers=[new_base_model_id.submit, change_base_model_button.click],
64
+ fn=model.set_base_model,
65
+ inputs=new_base_model_id,
66
+ outputs=current_base_model,
67
+ api_name=False,
68
+ concurrency_id="main",
 
 
 
 
 
 
 
69
  )
70
 
71
  if __name__ == "__main__":
72
+ demo.queue(max_size=20).launch()
app_.py DELETED
@@ -1,72 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- import gradio as gr
4
- import torch
5
-
6
- from app_canny import create_demo as create_demo_canny
7
-
8
- from model import Model
9
- from settings import ALLOW_CHANGING_BASE_MODEL, DEFAULT_MODEL_ID, SHOW_DUPLICATE_BUTTON
10
-
11
- DESCRIPTION = "# Material Authoring Demo v0.1. Under Construction"
12
-
13
- if not torch.cuda.is_available():
14
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
15
-
16
- model = Model(base_model_id=DEFAULT_MODEL_ID, task_name="Canny")
17
-
18
- with gr.Blocks() as demo:
19
- gr.Markdown(DESCRIPTION)
20
- gr.DuplicateButton(
21
- value="Duplicate Space for private use",
22
- elem_id="duplicate-button",
23
- visible=SHOW_DUPLICATE_BUTTON,
24
- )
25
-
26
- with gr.Tabs():
27
- with gr.Tab("Canny"):
28
- create_demo_canny(model.process_canny)
29
- with gr.Tab("Texnet"):
30
- create_demo_canny(model.process_canny)
31
- with gr.Tab("Matnet"):
32
- create_demo_canny(model.process_canny)
33
-
34
- with gr.Accordion(label="Base model", open=False):
35
- with gr.Row():
36
- with gr.Column(scale=5):
37
- current_base_model = gr.Text(label="Current base model")
38
- with gr.Column(scale=1):
39
- check_base_model_button = gr.Button("Check current base model")
40
- with gr.Row():
41
- with gr.Column(scale=5):
42
- new_base_model_id = gr.Text(
43
- label="New base model",
44
- max_lines=1,
45
- placeholder="stable-diffusion-v1-5/stable-diffusion-v1-5",
46
- info="The base model must be compatible with Stable Diffusion v1.5.",
47
- interactive=ALLOW_CHANGING_BASE_MODEL,
48
- )
49
- with gr.Column(scale=1):
50
- change_base_model_button = gr.Button("Change base model", interactive=ALLOW_CHANGING_BASE_MODEL)
51
- if not ALLOW_CHANGING_BASE_MODEL:
52
- gr.Markdown(
53
- """The base model is not allowed to be changed in this Space so as not to slow down the demo, but it can be changed if you duplicate the Space."""
54
- )
55
-
56
- check_base_model_button.click(
57
- fn=lambda: model.base_model_id,
58
- outputs=current_base_model,
59
- queue=False,
60
- api_name="check_base_model",
61
- )
62
- gr.on(
63
- triggers=[new_base_model_id.submit, change_base_model_button.click],
64
- fn=model.set_base_model,
65
- inputs=new_base_model_id,
66
- outputs=current_base_model,
67
- api_name=False,
68
- concurrency_id="main",
69
- )
70
-
71
- if __name__ == "__main__":
72
- demo.queue(max_size=20).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_sd.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import random
4
+
5
+ import spaces #[uncomment to use ZeroGPU]
6
+ from diffusers import DiffusionPipeline
7
+ import torch
8
+
9
+ device = "cuda" if torch.cuda.is_available() else "cpu"
10
+ model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
+
12
+ if torch.cuda.is_available():
13
+ torch_dtype = torch.float16
14
+ else:
15
+ torch_dtype = torch.float32
16
+
17
+ pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
+ pipe = pipe.to(device)
19
+
20
+ MAX_SEED = np.iinfo(np.int32).max
21
+ MAX_IMAGE_SIZE = 1024
22
+
23
+
24
+ @spaces.GPU #[uncomment to use ZeroGPU]
25
+ def infer(
26
+ prompt,
27
+ negative_prompt,
28
+ seed,
29
+ randomize_seed,
30
+ width,
31
+ height,
32
+ guidance_scale,
33
+ num_inference_steps,
34
+ progress=gr.Progress(track_tqdm=True),
35
+ ):
36
+ if randomize_seed:
37
+ seed = random.randint(0, MAX_SEED)
38
+
39
+ generator = torch.Generator().manual_seed(seed)
40
+
41
+ image = pipe(
42
+ prompt=prompt,
43
+ negative_prompt=negative_prompt,
44
+ guidance_scale=guidance_scale,
45
+ num_inference_steps=num_inference_steps,
46
+ width=width,
47
+ height=height,
48
+ generator=generator,
49
+ ).images[0]
50
+
51
+ return image, seed
52
+
53
+
54
+ examples = [
55
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
+ "An astronaut riding a green horse",
57
+ "A delicious ceviche cheesecake slice",
58
+ ]
59
+
60
+ css = """
61
+ #col-container {
62
+ margin: 0 auto;
63
+ max-width: 640px;
64
+ }
65
+ """
66
+
67
+ with gr.Blocks(css=css) as demo:
68
+ with gr.Column(elem_id="col-container"):
69
+ gr.Markdown(" # Text-to-Image Gradio Template")
70
+
71
+ with gr.Row():
72
+ prompt = gr.Text(
73
+ label="Prompt",
74
+ show_label=False,
75
+ max_lines=1,
76
+ placeholder="Enter your prompt",
77
+ container=False,
78
+ )
79
+
80
+ run_button = gr.Button("Run", scale=0, variant="primary")
81
+
82
+ result = gr.Image(label="Result", show_label=False)
83
+
84
+ with gr.Accordion("Advanced Settings", open=False):
85
+ negative_prompt = gr.Text(
86
+ label="Negative prompt",
87
+ max_lines=1,
88
+ placeholder="Enter a negative prompt",
89
+ visible=False,
90
+ )
91
+
92
+ seed = gr.Slider(
93
+ label="Seed",
94
+ minimum=0,
95
+ maximum=MAX_SEED,
96
+ step=1,
97
+ value=0,
98
+ )
99
+
100
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
+
102
+ with gr.Row():
103
+ width = gr.Slider(
104
+ label="Width",
105
+ minimum=256,
106
+ maximum=MAX_IMAGE_SIZE,
107
+ step=32,
108
+ value=1024, # Replace with defaults that work for your model
109
+ )
110
+
111
+ height = gr.Slider(
112
+ label="Height",
113
+ minimum=256,
114
+ maximum=MAX_IMAGE_SIZE,
115
+ step=32,
116
+ value=1024, # Replace with defaults that work for your model
117
+ )
118
+
119
+ with gr.Row():
120
+ guidance_scale = gr.Slider(
121
+ label="Guidance scale",
122
+ minimum=0.0,
123
+ maximum=10.0,
124
+ step=0.1,
125
+ value=0.0, # Replace with defaults that work for your model
126
+ )
127
+
128
+ num_inference_steps = gr.Slider(
129
+ label="Number of inference steps",
130
+ minimum=1,
131
+ maximum=50,
132
+ step=1,
133
+ value=2, # Replace with defaults that work for your model
134
+ )
135
+
136
+ gr.Examples(examples=examples, inputs=[prompt])
137
+ gr.on(
138
+ triggers=[run_button.click, prompt.submit],
139
+ fn=infer,
140
+ inputs=[
141
+ prompt,
142
+ negative_prompt,
143
+ seed,
144
+ randomize_seed,
145
+ width,
146
+ height,
147
+ guidance_scale,
148
+ num_inference_steps,
149
+ ],
150
+ outputs=[result, seed],
151
+ )
152
+
153
+ if __name__ == "__main__":
154
+ demo.launch()
model.py CHANGED
@@ -4,6 +4,7 @@ import numpy as np
4
  import PIL.Image
5
  import torch
6
  from controlnet_aux.util import HWC3
 
7
  from diffusers import (
8
  ControlNetModel,
9
  DiffusionPipeline,
@@ -122,6 +123,7 @@ class Model:
122
  image=control_image,
123
  ).images
124
 
 
125
  @torch.inference_mode()
126
  def process_canny(
127
  self,
 
4
  import PIL.Image
5
  import torch
6
  from controlnet_aux.util import HWC3
7
+ import spaces #[uncomment to use ZeroGPU]
8
  from diffusers import (
9
  ControlNetModel,
10
  DiffusionPipeline,
 
123
  image=control_image,
124
  ).images
125
 
126
+ @spaces.GPU #[uncomment to use ZeroGPU]
127
  @torch.inference_mode()
128
  def process_canny(
129
  self,