File size: 6,401 Bytes
6001e3c
6bc9074
e845246
8ee496d
922fdb6
6bc9074
6001e3c
ee973ae
8ee496d
e845246
 
 
 
6bc9074
 
ee973ae
922fdb6
70f555c
922fdb6
 
 
 
223ef25
ee973ae
70f555c
 
ee973ae
70f555c
 
 
 
 
 
223ef25
ee973ae
5de7ece
70f555c
 
922fdb6
 
70f555c
 
 
922fdb6
 
 
 
8c7013a
 
 
 
70f555c
ee973ae
 
8c7013a
70f555c
 
 
ee973ae
70f555c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8c7013a
ee973ae
70f555c
 
 
ee973ae
 
70f555c
 
 
 
 
 
ee973ae
70f555c
ee973ae
70f555c
 
 
 
 
 
 
ee973ae
 
70f555c
 
 
ee973ae
 
70f555c
ee973ae
70f555c
ee973ae
70f555c
 
ee973ae
70f555c
ee973ae
70f555c
 
 
 
 
 
 
 
ee973ae
70f555c
 
 
 
 
 
 
 
ee973ae
 
70f555c
 
6bc9074
70f555c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
import gradio as gr
import spaces
import torch
import os
from compel import Compel, ReturnedEmbeddingsType
from diffusers import DiffusionPipeline

# Load model
model_name = os.environ.get('MODEL_NAME', 'UnfilteredAI/NSFW-gen-v2')
pipe = DiffusionPipeline.from_pretrained(
    model_name,
    torch_dtype=torch.float16
)
pipe.to('cuda')

# Compel setup
compel = Compel(
  tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
  text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
  returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
  requires_pooled=[False, True]
)

# Default negative prompt
default_negative_prompt = "(low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn, (deformed | distorted | disfigured:1.3), bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers:1.4, disconnected limbs, blurry, amputation."

# Example prompts
example_prompts = [
    ["a beautiful woman in a summer dress at the beach, golden sunset, professional photography, 8k", default_negative_prompt, 40, 7.5, 1024, 1024, 4],
    ["portrait of a cyberpunk character, highly detailed, neon lights, futuristic cityscape in background, 8k, ultra realistic", default_negative_prompt, 50, 8.0, 768, 1024, 4],
    ["detailed fantasy art of magical forest with fairies, ethereal lighting, mystical atmosphere", default_negative_prompt, 60, 7.0, 1024, 768, 4],
    ["photorealistic portrait of a stunning model, studio lighting, fashion photography", default_negative_prompt, 45, 7.0, 1024, 1024, 4],
]

# Image generation function
@spaces.GPU(duration=120)
def generate(prompt, negative_prompt, num_inference_steps, guidance_scale, width, height, num_samples, progress=gr.Progress()):
    progress(0, desc="Preparing")
    embeds, pooled = compel(prompt)
    neg_embeds, neg_pooled = compel(negative_prompt)
    
    progress(0.1, desc="Generating images")
    images = pipe(
        prompt_embeds=embeds,
        pooled_prompt_embeds=pooled,
        negative_prompt_embeds=neg_embeds,
        negative_pooled_prompt_embeds=neg_pooled,
        num_inference_steps=num_inference_steps,
        guidance_scale=guidance_scale,
        width=width,
        height=height,
        num_images_per_prompt=num_samples,
        callback_steps=1,  # Fixed: Added callback_steps parameter
        callback_on_step_end=lambda i, t, latents: progress((i + 1) / num_inference_steps)
    ).images
    
    return images

# CSS styles
css = """
.gallery-item {
    transition: transform 0.2s;
    box-shadow: 0 4px 8px rgba(0,0,0,0.1);
    border-radius: 10px;
}
.gallery-item:hover {
    transform: scale(1.03);
    box-shadow: 0 8px 16px rgba(0,0,0,0.2);
}
.container {
    max-width: 1200px;
    margin: auto;
}
.header {
    text-align: center;
    margin-bottom: 2rem;
    padding: 1rem;
    background: linear-gradient(90deg, rgba(76,0,161,0.8) 0%, rgba(28,110,164,0.8) 100%);
    border-radius: 10px;
    color: white;
}
.slider-container {
    background-color: #f5f5f5;
    padding: 1rem;
    border-radius: 10px;
    margin-bottom: 1rem;
}
.prompt-container {
    background-color: #f0f8ff;
    padding: 1rem;
    border-radius: 10px;
    margin-bottom: 1rem;
    border: 1px solid #d0e8ff;
}
.examples-header {
    background: linear-gradient(90deg, rgba(41,128,185,0.7) 0%, rgba(142,68,173,0.7) 100%);
    color: white;
    padding: 0.5rem;
    border-radius: 8px;
    text-align: center;
    margin-bottom: 0.5rem;
}
"""

# Gradio interface
with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
    gr.HTML("""
    <div class="header">
        <h1>🎨 UnfilteredAI NSFW-gen-v2 Image Generator</h1>
        <p>Enter creative prompts and generate high-quality images.</p>
    </div>
    """)
    
    with gr.Row():
        with gr.Column(scale=2):
            with gr.Group(elem_classes="prompt-container"):
                prompt = gr.Textbox(label="Prompt", placeholder="Describe your desired image...", lines=3)
                negative_prompt = gr.Textbox(
                    label="Negative Prompt", 
                    value=default_negative_prompt,
                    lines=3
                )
            
            with gr.Group(elem_classes="slider-container"):
                with gr.Row():
                    with gr.Column():
                        steps = gr.Slider(minimum=20, maximum=100, value=60, step=1, label="Inference Steps (Quality)", info="Higher values improve quality (longer generation time)")
                        guidance = gr.Slider(minimum=1, maximum=15, value=7, step=0.1, label="Guidance Scale (Creativity)", info="Lower values create more creative results")
                    
                    with gr.Column():
                        with gr.Row():
                            width = gr.Slider(minimum=512, maximum=1536, value=1024, step=128, label="Width")
                            height = gr.Slider(minimum=512, maximum=1536, value=1024, step=128, label="Height")
                        
                        num_samples = gr.Slider(minimum=1, maximum=8, value=4, step=1, label="Number of Images", info="Number of images to generate at once")
            
            generate_btn = gr.Button("🚀 Generate Images", variant="primary", size="lg")
        
        with gr.Column(scale=3):
            output_gallery = gr.Gallery(label="Generated Images", elem_classes="gallery-item", columns=2, object_fit="contain", height=650)
    
    gr.HTML("""<div class="examples-header"><h3>✨ Example Prompts</h3></div>""")
    gr.Examples(
        examples=example_prompts,
        inputs=[prompt, negative_prompt, steps, guidance, width, height, num_samples],
        outputs=output_gallery,
        fn=generate,
        cache_examples=True,
    )
    
    # Event connections
    generate_btn.click(
        fn=generate,
        inputs=[prompt, negative_prompt, steps, guidance, width, height, num_samples],
        outputs=output_gallery
    )
    
    gr.HTML("""
    <div style="text-align: center; margin-top: 20px; padding: 10px; background-color: #f0f0f0; border-radius: 10px;">
        <p>💡 Tip: For high-quality images, use detailed prompts and higher inference steps.</p>
        <p>Example: Add quality terms like "professional photography, 8k, highly detailed, sharp focus, HDR" to your prompts.</p>
    </div>
    """)

demo.launch()