panelforge commited on
Commit
1bc5792
·
verified ·
1 Parent(s): 6bc27bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -143
app.py CHANGED
@@ -1,174 +1,109 @@
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
-
5
- import spaces # [uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline, EulerAncestralDiscreteScheduler
7
  import torch
 
 
8
 
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "John6666/wai-ani-nsfw-ponyxl-v140-sdxl" # Replace with your desired model
11
-
12
- torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
13
 
14
  pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
15
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
16
  pipe = pipe.to(device)
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
19
  MAX_IMAGE_SIZE = 1024
20
 
21
- @spaces.GPU # [uncomment to use ZeroGPU]
22
- def infer(
23
- prompt,
24
- negative_prompt,
25
- positive_prefix,
26
- negative_prefix,
27
- seed,
28
- randomize_seed,
29
- width,
30
- height,
31
- guidance_scale,
32
- num_inference_steps,
33
- progress=gr.Progress(track_tqdm=True),
34
- ):
 
 
35
  if randomize_seed:
36
  seed = random.randint(0, MAX_SEED)
37
 
38
  generator = torch.Generator().manual_seed(seed)
39
 
40
- # Combine prefixes with prompts
41
- full_prompt = positive_prefix + " " + prompt
42
- full_negative_prompt = negative_prefix + " " + negative_prompt
43
-
44
  image = pipe(
45
- prompt=full_prompt,
46
  negative_prompt=full_negative_prompt,
47
  guidance_scale=guidance_scale,
48
  num_inference_steps=num_inference_steps,
49
  width=width,
50
  height=height,
51
- generator=generator,
52
  ).images[0]
53
 
54
- return image, seed
55
-
56
- examples = [
57
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
58
- "An astronaut riding a green horse",
59
- "A delicious ceviche cheesecake slice",
60
- ]
61
-
62
- css = """
63
- #col-container {
64
- margin: 0 auto;
65
- max-width: 640px;
66
- }
67
- """
68
-
69
- with gr.Blocks(css=css) as demo:
70
- with gr.Column(elem_id="col-container"):
71
- gr.Markdown(" # Text-to-Image Gradio Template")
72
-
73
- with gr.Row():
74
- prompt = gr.Textbox(
75
- label="Prompt",
76
- show_label=False,
77
- max_lines=5,
78
- placeholder="Enter your prompt",
79
- container=False,
80
- )
81
-
82
- run_button = gr.Button("Run", scale=0, variant="primary")
83
-
84
- result = gr.Image(label="Result", show_label=False)
85
-
86
- with gr.Accordion("Advanced Settings", open=False):
87
- negative_prompt = gr.Textbox(
88
- label="Negative prompt",
89
- max_lines=1,
90
- placeholder="Enter a negative prompt",
91
- visible=True,
92
- )
93
-
94
- seed = gr.Slider(
95
- label="Seed",
96
- minimum=0,
97
- maximum=MAX_SEED,
98
- step=1,
99
- value=0,
100
- )
101
-
102
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
103
-
104
- with gr.Row():
105
- positive_prefix = gr.Textbox(
106
- label="Positive prompt prefix",
107
- max_lines=3,
108
- placeholder="e.g., score_9, score_8_up, score_7_up, source_anime, ",
109
- value="score_9, score_8_up, score_7_up, source_anime, ",
110
- )
111
-
112
- negative_prefix = gr.Textbox(
113
- label="Negative prompt prefix",
114
- max_lines=3,
115
- placeholder="e.g., worst quality, bad quality, jpeg artifacts, source_cartoon, 3d, (censor), monochrome,blurry, lowres, watermark, ",
116
- value="bad quality, worst quality, bad quality, jpeg artifacts, source_cartoon, 3d, (censor), monochrome,blurry, lowres, watermark, ",
117
- )
118
-
119
- with gr.Row():
120
- width = gr.Slider(
121
- label="Width",
122
- minimum=256,
123
- maximum=MAX_IMAGE_SIZE,
124
- step=32,
125
- value=1024,
126
- )
127
-
128
- height = gr.Slider(
129
- label="Height",
130
- minimum=256,
131
- maximum=MAX_IMAGE_SIZE,
132
- step=32,
133
- value=1024,
134
- )
135
-
136
- with gr.Row():
137
- guidance_scale = gr.Slider(
138
- label="Guidance scale",
139
- minimum=5.0,
140
- maximum=7.0,
141
- step=0.1,
142
- value=6.0, # Adjusted default
143
- )
144
-
145
- num_inference_steps = gr.Slider(
146
- label="Number of inference steps",
147
- minimum=20,
148
- maximum=30,
149
- step=1,
150
- value=25,
151
- )
152
-
153
- gr.Examples(examples=examples, inputs=[prompt])
154
-
155
- gr.on(
156
- triggers=[run_button.click, prompt.submit],
157
  fn=infer,
158
  inputs=[
159
- prompt,
160
- negative_prompt,
161
- positive_prefix,
162
- negative_prefix,
163
- seed,
164
- randomize_seed,
165
- width,
166
- height,
167
- guidance_scale,
168
- num_inference_steps,
169
  ],
170
- outputs=[result, seed],
171
  )
172
 
173
- if __name__ == "__main__":
174
- demo.launch()
 
1
+ # app.py
2
+
3
  import gradio as gr
4
  import numpy as np
5
  import random
 
 
 
6
  import torch
7
+ from diffusers import DiffusionPipeline
8
+ from tags import TAGS
9
 
10
+ # ========== Model Setup ==========
11
  device = "cuda" if torch.cuda.is_available() else "cpu"
12
+ torch_dtype = torch.float16 if device == "cuda" else torch.float32
13
+ model_repo_id = "John6666/wai-ani-nsfw-ponyxl-v8-sdxl"
 
14
 
15
  pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
 
16
  pipe = pipe.to(device)
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
19
  MAX_IMAGE_SIZE = 1024
20
 
21
+ # ========== Inference Function ==========
22
+ def infer(prompt, negative_prompt, seed, randomize_seed, width, height,
23
+ guidance_scale, num_inference_steps, *tag_selections, active_tab):
24
+
25
+ if active_tab == "Prompt Input":
26
+ final_prompt = f"score_9, score_8_up, score_7_up, source_anime, {prompt}"
27
+ else:
28
+ all_tags = []
29
+ for (group_name, tag_dict), selected_keys in zip(TAGS.items(), tag_selections):
30
+ all_tags.extend([tag_dict[key] for key in selected_keys])
31
+ tag_text = ", ".join(all_tags)
32
+ final_prompt = f"score_9, score_8_up, score_7_up, source_anime, {tag_text}"
33
+
34
+ additional_negatives = "worst quality, bad quality, jpeg artifacts, source_cartoon, 3d, (censor), monochrome, blurry, lowres, watermark"
35
+ full_negative_prompt = f"{additional_negatives}, {negative_prompt}"
36
+
37
  if randomize_seed:
38
  seed = random.randint(0, MAX_SEED)
39
 
40
  generator = torch.Generator().manual_seed(seed)
41
 
 
 
 
 
42
  image = pipe(
43
+ prompt=final_prompt,
44
  negative_prompt=full_negative_prompt,
45
  guidance_scale=guidance_scale,
46
  num_inference_steps=num_inference_steps,
47
  width=width,
48
  height=height,
49
+ generator=generator
50
  ).images[0]
51
 
52
+ return image, seed, f"Prompt used: {final_prompt}\nNegative prompt used: {full_negative_prompt}"
53
+
54
+ # ========== Gradio UI ==========
55
+ with gr.Blocks(css="""
56
+ #col-container { max-width: 1280px; margin: auto; }
57
+ #left-column, #right-column { display: inline-block; vertical-align: top; width: 48%; padding: 1%; }
58
+ #run-button { width: 100%; }
59
+ """) as demo:
60
+
61
+ with gr.Row():
62
+ with gr.Column(elem_id="left-column"):
63
+ gr.Markdown("# Rainbow Media X")
64
+
65
+ result = gr.Image(label="Result", show_label=False)
66
+ prompt_info = gr.Textbox(label="Prompts Used", lines=3, interactive=False)
67
+
68
+ with gr.Accordion("Advanced Settings", open=False):
69
+ negative_prompt = gr.Textbox(label="Negative Prompt", max_lines=1, placeholder="Enter a negative prompt")
70
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
71
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
72
+
73
+ with gr.Row():
74
+ width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=768)
75
+ height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=768)
76
+
77
+ with gr.Row():
78
+ guidance_scale = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=10.0, step=0.1, value=7.0)
79
+ num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=35)
80
+
81
+ run_button = gr.Button("Run", elem_id="run-button")
82
+
83
+ with gr.Column(elem_id="right-column"):
84
+ active_tab = gr.State("Prompt Input")
85
+
86
+ with gr.Tabs() as tabs:
87
+ with gr.TabItem("Prompt Input") as prompt_tab:
88
+ prompt = gr.Textbox(label="Prompt", placeholder="Enter a custom prompt", lines=3)
89
+ prompt_tab.select(lambda: "Prompt Input", outputs=active_tab)
90
+
91
+ with gr.TabItem("Tag Selection") as tag_tab:
92
+ tag_checkboxes = []
93
+ for group_name, tag_dict in TAGS.items():
94
+ checkbox = gr.CheckboxGroup(choices=list(tag_dict.keys()), label=group_name)
95
+ tag_checkboxes.append(checkbox)
96
+ tag_tab.select(lambda: "Tag Selection", outputs=active_tab)
97
+
98
+ run_button.click(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  fn=infer,
100
  inputs=[
101
+ prompt, negative_prompt, seed, randomize_seed, width, height,
102
+ guidance_scale, num_inference_steps,
103
+ *tag_checkboxes,
104
+ active_tab
 
 
 
 
 
 
105
  ],
106
+ outputs=[result, seed, prompt_info]
107
  )
108
 
109
+ demo.queue().launch()