shichen1231 commited on
Commit
9f9752e
Β·
0 Parent(s):

Duplicate from ioclab/illumination_composition_controlnet

Browse files
.fqapp copy ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import gradio as gr
3
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
4
+ import torch
5
+ torch.backends.cuda.matmul.allow_tf32 = True
6
+ import gc
7
+
8
+ controlnet = [ControlNetModel.from_pretrained("ioclab/connow", torch_dtype=torch.float16, use_safetensors=True),ControlNetModel.from_pretrained( "lllyasviel/control_v11p_sd15_seg" , torch_dtype=torch.float16),]
9
+
10
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
11
+ "andite/anything-v4.0",
12
+ controlnet=controlnet,
13
+ torch_dtype=torch.float16,
14
+ safety_checker=None,
15
+ )
16
+
17
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
18
+
19
+ # pipe.enable_xformers_memory_efficient_attention()
20
+ # pipe.enable_model_cpu_offload()
21
+ # pipe.enable_attention_slicing()
22
+
23
+ def infer(
24
+ prompt,
25
+ negative_prompt,
26
+ conditioning_image,
27
+ seg_image,
28
+ num_inference_steps=30,
29
+ size=768,
30
+ guidance_scale=7.0,
31
+ seed=1234,
32
+ ill=0.6,
33
+ seg=1
34
+
35
+ ):
36
+
37
+ conditioning_image = Image.fromarray(conditioning_image)
38
+ # conditioning_image = conditioning_image_raw.convert('L')
39
+ seg_image= Image.fromarray(seg_image)
40
+ g_cpu = torch.Generator()
41
+
42
+ if seed == -1:
43
+ generator = g_cpu.manual_seed(g_cpu.seed())
44
+ else:
45
+ generator = g_cpu.manual_seed(seed)
46
+ isa = [conditioning_image,seg_image]
47
+ output_image = pipe(
48
+ prompt,
49
+ isa,
50
+ height=size,
51
+ width=size,
52
+ num_inference_steps=num_inference_steps,
53
+ generator=generator,
54
+ negative_prompt=negative_prompt,
55
+ guidance_scale=guidance_scale,
56
+ controlnet_conditioning_scale=[ill,seg],
57
+
58
+ ).images[0]
59
+
60
+ del conditioning_image, conditioning_image_raw,seg_image
61
+ gc.collect()
62
+
63
+ return output_image
64
+
65
+ with gr.Blocks() as demo:
66
+ gr.Markdown(
67
+ """
68
+ # ControlNet on Brightness
69
+
70
+ This is a demo on ControlNet based on brightness.
71
+ """)
72
+
73
+ with gr.Row():
74
+ with gr.Column():
75
+ prompt = gr.Textbox(
76
+ label="Prompt",
77
+ )
78
+ negative_prompt = gr.Textbox(
79
+ label="Negative Prompt",
80
+ )
81
+ conditioning_image = gr.Image(
82
+ label="Conditioning Image",
83
+ )
84
+ seg_image = gr.Image(
85
+ label="(Optional)seg Image",
86
+ )
87
+ with gr.Accordion('Advanced options', open=False):
88
+ with gr.Row():
89
+ num_inference_steps = gr.Slider(
90
+ 10, 40, 20,
91
+ step=1,
92
+ label="Steps",
93
+ )
94
+ size = gr.Slider(
95
+ 256, 768, 512,
96
+ step=128,
97
+ label="Size",
98
+ )
99
+ with gr.Row():
100
+ guidance_scale = gr.Slider(
101
+ label='Guidance Scale',
102
+ minimum=0.1,
103
+ maximum=30.0,
104
+ value=7.0,
105
+ step=0.1
106
+ )
107
+ seed = gr.Slider(
108
+ label='Seed',
109
+ value=-1,
110
+ minimum=-1,
111
+ maximum=2147483647,
112
+ step=1,
113
+ # randomize=True
114
+ )
115
+ with gr.Row():
116
+ ill = gr.Slider(
117
+ label='controlnet_ILL_scale',
118
+ minimum=0,
119
+ maximum=1,
120
+ value=0.6,
121
+ step=0.05
122
+ )
123
+ seg = gr.Slider(
124
+ label='controlnet_SEG_scale',
125
+ value=1,
126
+ minimum=0,
127
+ maximum=1,
128
+ step=0.1,
129
+ # randomize=True
130
+ )
131
+ submit_btn = gr.Button(
132
+ value="Submit",
133
+ variant="primary"
134
+ )
135
+ with gr.Column(min_width=300):
136
+ output = gr.Image(
137
+ label="Result",
138
+ )
139
+
140
+ submit_btn.click(
141
+ fn=infer,
142
+ inputs=[
143
+ prompt, negative_prompt, conditioning_image,seg_image, num_inference_steps, size, guidance_scale, seed,ill,seg
144
+ ],
145
+ outputs=output
146
+ )
147
+ gr.Markdown(
148
+ """
149
+ * [Dataset](https://huggingface.co/datasets/ioclab/grayscale_image_aesthetic_3M) Note that this was handled extra, and a preview version of the processing is here
150
+ [Anime Dataset](https://huggingface.co/datasets/ioclab/lighttestout) [Nature Dataset] (https://huggingface.co/datasets/ioclab/light)
151
+ * [Diffusers model](https://huggingface.co/ioclab/connow/tree/main), [Web UI model](https://huggingface.co/ioclab/control_v1u_sd15_illumination_webui)
152
+ * [Training Report](https://huggingface.co/ioclab/control_v1u_sd15_illumination_webui), [Doc(Chinese)](https://aigc.ioclab.com/sd-showcase/light_controlnet.html)
153
+ """)
154
+
155
+ demo.launch()
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ *.png filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+
2
+ .idea
3
+
4
+ venv
5
+ .venv
6
+
7
+ gradio_cached_examples
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Brightness ControlNet
3
+ emoji: πŸ’»
4
+ colorFrom: red
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.27.0
8
+ app_file: app.py
9
+ pinned: false
10
+ tags:
11
+ - jax-diffusers-event
12
+ duplicated_from: ioclab/illumination_composition_controlnet
13
+ ---
14
+
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import gradio as gr
3
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
4
+ import torch
5
+ torch.backends.cuda.matmul.allow_tf32 = True
6
+ import gc
7
+
8
+ controlnet = ControlNetModel.from_pretrained("ioclab/connow", torch_dtype=torch.float16, use_safetensors=True)
9
+
10
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
11
+ "andite/anything-v4.0",
12
+ controlnet=controlnet,
13
+ torch_dtype=torch.float16,
14
+ safety_checker=None,
15
+ )
16
+
17
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
18
+
19
+ pipe.enable_xformers_memory_efficient_attention()
20
+ pipe.enable_model_cpu_offload()
21
+ pipe.enable_attention_slicing()
22
+
23
+ def infer(
24
+ prompt,
25
+ negative_prompt,
26
+ conditioning_image,
27
+ num_inference_steps=30,
28
+ size=768,
29
+ guidance_scale=7.0,
30
+ seed=1234,
31
+ ill=0.6,
32
+ ):
33
+
34
+ conditioning_image = Image.fromarray(conditioning_image)
35
+ # conditioning_image = conditioning_image_raw.convert('L')
36
+ g_cpu = torch.Generator()
37
+
38
+ if seed == -1:
39
+ generator = g_cpu.manual_seed(g_cpu.seed())
40
+ else:
41
+ generator = g_cpu.manual_seed(seed)
42
+
43
+ output_image = pipe(
44
+ prompt,
45
+ conditioning_image,
46
+ height=size,
47
+ width=size,
48
+ num_inference_steps=num_inference_steps,
49
+ generator=generator,
50
+ negative_prompt=negative_prompt,
51
+ guidance_scale=guidance_scale,
52
+ controlnet_conditioning_scale=ill,
53
+
54
+ ).images[0]
55
+
56
+ del conditioning_image, conditioning_image_raw,seg_image
57
+ gc.collect()
58
+
59
+ return output_image
60
+
61
+ with gr.Blocks() as demo:
62
+ gr.Markdown(
63
+ """
64
+ # ControlNet on Brightness
65
+
66
+ This is a demo on ControlNet based on brightness.
67
+ """)
68
+
69
+ with gr.Row():
70
+ with gr.Column():
71
+ prompt = gr.Textbox(
72
+ label="Prompt",
73
+ )
74
+ negative_prompt = gr.Textbox(
75
+ label="Negative Prompt",
76
+ )
77
+ conditioning_image = gr.Image(
78
+ label="Conditioning Image",
79
+ )
80
+ with gr.Accordion('Advanced options', open=False):
81
+ with gr.Row():
82
+ num_inference_steps = gr.Slider(
83
+ 10, 40, 20,
84
+ step=1,
85
+ label="Steps",
86
+ )
87
+ size = gr.Slider(
88
+ 256, 768, 512,
89
+ step=128,
90
+ label="Size",
91
+ )
92
+ with gr.Row():
93
+ guidance_scale = gr.Slider(
94
+ label='Guidance Scale',
95
+ minimum=0.1,
96
+ maximum=30.0,
97
+ value=7.0,
98
+ step=0.1
99
+ )
100
+ seed = gr.Slider(
101
+ label='Seed',
102
+ value=-1,
103
+ minimum=-1,
104
+ maximum=2147483647,
105
+ step=1,
106
+ # randomize=True
107
+ )
108
+ with gr.Row():
109
+ ill = gr.Slider(
110
+ label='controlnet_ILL_scale',
111
+ minimum=0,
112
+ maximum=1,
113
+ value=0.6,
114
+ step=0.05
115
+ )
116
+
117
+ submit_btn = gr.Button(
118
+ value="Submit",
119
+ variant="primary"
120
+ )
121
+ with gr.Column(min_width=300):
122
+ output = gr.Image(
123
+ label="Result",
124
+ )
125
+
126
+ submit_btn.click(
127
+ fn=infer,
128
+ inputs=[
129
+ prompt, negative_prompt, conditioning_image, num_inference_steps, size, guidance_scale, seed,ill,
130
+ ],
131
+ outputs=output
132
+ )
133
+ gr.Markdown(
134
+ """
135
+ * [Dataset](https://huggingface.co/datasets/ioclab/grayscale_image_aesthetic_3M) Note that this was handled extra, and a preview version of the processing is here
136
+ [Anime Dataset](https://huggingface.co/datasets/ioclab/lighttestout) [Nature Dataset] (https://huggingface.co/datasets/ioclab/light)
137
+ * [Diffusers model](https://huggingface.co/ioclab/connow/tree/main), [Web UI model](https://huggingface.co/ioclab/control_v1u_sd15_illumination_webui)
138
+ * [Training Report](https://huggingface.co/ioclab/control_v1u_sd15_illumination_webui), [Doc(Chinese)](https://aigc.ioclab.com/sd-showcase/light_controlnet.html)
139
+ """)
140
+
141
+ demo.launch()
conditioning_images/ill1.png ADDED

Git LFS Details

  • SHA256: 0ac4e0d3a3b40a57dbe83f3f93972f1617969f13d37954cd9622664a18ee3859
  • Pointer size: 131 Bytes
  • Size of remote file: 118 kB
conditioning_images/ill2.png ADDED

Git LFS Details

  • SHA256: d39fc8c33f48565b2f4c2829d728defdc5c4d96dfabc2a9c577e87120202354a
  • Pointer size: 131 Bytes
  • Size of remote file: 212 kB
conditioning_images/ill3.png ADDED

Git LFS Details

  • SHA256: a8ed56a4c3b00031e156d32e630405126e24c323170a258d17ade8ce956e109f
  • Pointer size: 131 Bytes
  • Size of remote file: 329 kB
conditioning_images/seg1.png ADDED

Git LFS Details

  • SHA256: 235c2dd751a02ce2d36c882815341626dd946dea978fe5872b7e682d06740e3f
  • Pointer size: 130 Bytes
  • Size of remote file: 60.5 kB
conditioning_images/seg2.png ADDED

Git LFS Details

  • SHA256: 2f2e2fc1aaa28cfcbfe33589b84d66fffc682547cd8c68a85639bcdff4bf6719
  • Pointer size: 130 Bytes
  • Size of remote file: 86.6 kB
conditioning_images/yl.jpg ADDED
conditioning_images/yl1.png ADDED

Git LFS Details

  • SHA256: d4fc27f0367503a7f20324379db7fe4248e59ce2b9b7d50fc984e9c8fdaea2de
  • Pointer size: 132 Bytes
  • Size of remote file: 2.73 MB
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+
2
+ accelerate
3
+ diffusers
4
+ transformers
5
+ torch
6
+ xformers
7
+ safetensors