seawolf2357 commited on
Commit
8a86340
ยท
verified ยท
1 Parent(s): 2579011

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +172 -201
app.py CHANGED
@@ -6,8 +6,10 @@ from diffusers.utils import load_image
6
  from PIL import Image
7
  import os
8
 
9
- # Style dictionary
10
- style_type_lora_dict = {
 
 
11
  "3D_Chibi": "3D_Chibi_lora_weights.safetensors",
12
  "American_Cartoon": "American_Cartoon_lora_weights.safetensors",
13
  "Chinese_Ink": "Chinese_Ink_lora_weights.safetensors",
@@ -32,260 +34,229 @@ style_type_lora_dict = {
32
  "Rick_Morty": "Rick_Morty_lora_weights.safetensors"
33
  }
34
 
35
- # Initialize pipeline globally
36
- pipeline = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
  def load_pipeline():
39
- global pipeline
40
- if pipeline is None:
41
- gr.Info("Loading FLUX.1-Kontext model...")
42
- # Load FLUX.1-Kontext-dev model
43
- pipeline = FluxKontextPipeline.from_pretrained(
44
- "black-forest-labs/FLUX.1-Kontext-dev",
45
- torch_dtype=torch.bfloat16
 
 
46
  )
47
- return pipeline
48
 
49
- @spaces.GPU(duration=120)
50
- def style_transfer(input_image, style_name, prompt_suffix, num_inference_steps, guidance_scale, seed):
51
- """
52
- Apply style transfer to the input image using selected style
53
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  if input_image is None:
55
- gr.Warning("Please upload an image first!")
56
  return None
57
-
58
  try:
59
- # Load pipeline and move to GPU
60
  pipe = load_pipeline()
61
- pipe = pipe.to('cuda')
62
-
63
- # Enable memory efficient settings
64
- pipe.enable_model_cpu_offload()
65
-
66
- # Set seed for reproducibility
67
- if seed > 0:
68
- generator = torch.Generator(device="cuda").manual_seed(seed)
69
- else:
70
- generator = torch.manual_seed(42)
71
-
72
- # Process input image
73
- if isinstance(input_image, str):
74
- image = load_image(input_image)
75
- else:
76
- image = input_image
77
-
78
- # Resize to 1024x1024 (required for Kontext)
79
- image = image.convert("RGB").resize((1024, 1024), Image.Resampling.LANCZOS)
80
-
81
- # Load the selected LoRA
82
- gr.Info(f"Loading {style_name} style...")
83
- lora_filename = style_type_lora_dict[style_name]
84
-
85
- # Load LoRA weights directly from the repository
86
  pipe.load_lora_weights(
87
  "Owen777/Kontext-Style-Loras",
88
- weight_name=lora_filename
 
89
  )
90
- pipe.set_adapters(["default"], adapter_weights=[1.0])
91
-
92
- # Create prompt for style transformation
93
- style_name_readable = style_name.replace('_', ' ')
94
- prompt = f"Turn this image into the {style_name_readable} style."
95
  if prompt_suffix and prompt_suffix.strip():
96
  prompt += f" {prompt_suffix.strip()}"
97
-
98
- gr.Info("Generating styled image...")
99
-
100
- # Generate the styled image with Kontext pipeline
101
  result = pipe(
102
- image=image,
103
  prompt=prompt,
104
- guidance_scale=guidance_scale,
105
- num_inference_steps=num_inference_steps,
106
  generator=generator,
107
  height=1024,
108
- width=1024
109
  )
110
-
111
- # Clear LoRA and GPU memory
112
  pipe.unload_lora_weights()
113
  torch.cuda.empty_cache()
114
-
115
  return result.images[0]
116
-
117
  except Exception as e:
118
- gr.Error(f"Error during style transfer: {str(e)}")
119
  torch.cuda.empty_cache()
120
  return None
121
 
122
- # Style descriptions
123
- style_descriptions = {
124
- "3D_Chibi": "Cute, miniature 3D character style with big heads",
125
- "American_Cartoon": "Classic American animation style",
126
- "Chinese_Ink": "Traditional Chinese ink painting aesthetic",
127
- "Clay_Toy": "Playful clay/plasticine toy appearance",
128
- "Fabric": "Soft, textile-like rendering",
129
- "Ghibli": "Studio Ghibli's distinctive anime style",
130
- "Irasutoya": "Simple, flat Japanese illustration style",
131
- "Jojo": "JoJo's Bizarre Adventure manga style",
132
- "Oil_Painting": "Classic oil painting texture and strokes",
133
- "Pixel": "Retro pixel art style",
134
- "Snoopy": "Peanuts comic strip style",
135
- "Poly": "Low-poly 3D geometric style",
136
- "LEGO": "LEGO brick construction style",
137
- "Origami": "Paper folding art style",
138
- "Pop_Art": "Bold, colorful pop art style",
139
- "Van_Gogh": "Van Gogh's expressive brushstroke style",
140
- "Paper_Cutting": "Paper cut-out art style",
141
- "Line": "Clean line art/sketch style",
142
- "Vector": "Clean vector graphics style",
143
- "Picasso": "Cubist art style inspired by Picasso",
144
- "Macaron": "Soft, pastel macaron-like style",
145
- "Rick_Morty": "Rick and Morty cartoon style"
146
- }
147
 
148
- # Create Gradio interface
 
 
 
149
  with gr.Blocks(title="FLUX.1 Kontext Style Transfer", theme=gr.themes.Soft()) as demo:
150
- gr.Markdown("""
151
- # ๐ŸŽจ FLUX.1 Kontext Style Transfer
152
-
153
- Transform your images into various artistic styles using FLUX.1-Kontext-dev and high-quality style LoRAs.
154
-
155
- This demo uses the official Owen777/Kontext-Style-Loras collection with 22 different artistic styles!
156
- """)
157
-
158
  with gr.Row():
159
  with gr.Column(scale=1):
160
- input_image = gr.Image(
161
- label="Upload Image",
162
- type="pil",
163
- height=400
164
- )
165
-
166
  style_dropdown = gr.Dropdown(
167
- choices=list(style_type_lora_dict.keys()),
168
  value="Ghibli",
169
  label="Select Style",
170
- info="Choose from 22 different artistic styles"
171
  )
172
-
173
  style_info = gr.Textbox(
174
  label="Style Description",
175
- value=style_descriptions["Ghibli"],
176
  interactive=False,
177
- lines=2
178
  )
179
-
180
  prompt_suffix = gr.Textbox(
181
  label="Additional Instructions (Optional)",
182
- placeholder="Add extra details like 'make it more colorful' or 'add dramatic lighting'...",
183
- lines=2
184
  )
185
-
186
  with gr.Accordion("Advanced Settings", open=False):
187
  num_steps = gr.Slider(
188
- minimum=10,
189
- maximum=50,
190
  value=24,
191
  step=1,
192
  label="Inference Steps",
193
- info="More steps = better quality but slower"
194
  )
195
-
196
  guidance = gr.Slider(
197
- minimum=1.0,
198
- maximum=5.0,
199
  value=2.5,
200
  step=0.1,
201
  label="Guidance Scale",
202
- info="How closely to follow the prompt (2.5 recommended)"
203
- )
204
-
205
- seed = gr.Number(
206
- label="Seed",
207
- value=42,
208
- info="Set to 0 for random results"
209
  )
210
-
211
- generate_btn = gr.Button("๐ŸŽจ Transform Image", variant="primary", size="lg")
212
-
 
 
 
213
  with gr.Column(scale=1):
214
- output_image = gr.Image(
215
- label="Styled Result",
216
- type="pil",
217
- height=400
 
 
 
 
218
  )
219
-
220
- gr.Markdown("""
221
- ### ๐Ÿ’ก Tips:
222
- - All images are resized to 1024x1024
223
- - First run downloads the model (~7GB)
224
- - Each style transformation takes ~30-60 seconds
225
- - Try different styles to find the best match!
226
- - Use additional instructions for fine control
227
- """)
228
-
229
- # Update style description when style changes
230
- def update_description(style):
231
- return style_descriptions.get(style, "")
232
-
233
- style_dropdown.change(
234
- fn=update_description,
235
- inputs=[style_dropdown],
236
- outputs=[style_info]
237
- )
238
-
239
- # Examples
240
- gr.Examples(
241
- examples=[
242
- ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "Ghibli", ""],
243
- ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "3D_Chibi", "make it extra cute"],
244
- ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "Van_Gogh", "with swirling sky"],
245
- ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "Pixel", "8-bit retro game style"],
246
- ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "Chinese_Ink", "mountain landscape"],
247
- ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "LEGO", "colorful blocks"],
248
  ],
249
- inputs=[input_image, style_dropdown, prompt_suffix],
250
- outputs=output_image,
251
- fn=lambda img, style, prompt: style_transfer(img, style, prompt, 24, 2.5, 42),
252
- cache_examples=False
253
  )
254
-
255
- # Connect the generate button
256
- generate_btn.click(
257
- fn=style_transfer,
258
- inputs=[input_image, style_dropdown, prompt_suffix, num_steps, guidance, seed],
259
- outputs=output_image
 
260
  )
261
-
262
- gr.Markdown("""
263
- ---
264
- ### ๐Ÿ“š Available Styles:
265
-
266
- **Anime/Cartoon**: Ghibli, American Cartoon, Jojo, Snoopy, Rick & Morty, Irasutoya
267
-
268
- **3D/Geometric**: 3D Chibi, Poly, LEGO, Clay Toy
269
-
270
- **Traditional Art**: Chinese Ink, Oil Painting, Van Gogh, Picasso, Pop Art
271
-
272
- **Craft/Material**: Fabric, Origami, Paper Cutting, Macaron
273
-
274
- **Digital/Modern**: Pixel, Line, Vector
275
-
276
- ---
277
-
278
- ### ๐Ÿš€ How it works:
279
- 1. Upload any image
280
- 2. Select a style from the dropdown
281
- 3. (Optional) Add custom instructions
282
- 4. Click "Transform Image" and wait ~30-60 seconds
283
- 5. Download your styled image!
284
-
285
- ---
286
-
287
- Created with โค๏ธ using [Owen777/Kontext-Style-Loras](https://huggingface.co/Owen777/Kontext-Style-Loras)
288
- """)
289
 
290
  if __name__ == "__main__":
291
- demo.launch()
 
6
  from PIL import Image
7
  import os
8
 
9
+ # ----------------------------------------------
10
+ # Style โ†’ LoRA file mapping
11
+ # ----------------------------------------------
12
+ STYLE_LORA_MAP = {
13
  "3D_Chibi": "3D_Chibi_lora_weights.safetensors",
14
  "American_Cartoon": "American_Cartoon_lora_weights.safetensors",
15
  "Chinese_Ink": "Chinese_Ink_lora_weights.safetensors",
 
34
  "Rick_Morty": "Rick_Morty_lora_weights.safetensors"
35
  }
36
 
37
+ # ----------------------------------------------
38
+ # Style descriptions (ํˆดํŒ์šฉ)
39
+ # ----------------------------------------------
40
+ STYLE_DESCRIPTIONS = {
41
+ "3D_Chibi": "๊ท€์—ฌ์šด SD ์บ๋ฆญํ„ฐ์˜ 3D ๋А๋‚Œ",
42
+ "American_Cartoon": "๊ณ ์ „์ ์ธ ๋ฏธ๊ตญ ์นดํˆฐ ์Šคํƒ€์ผ",
43
+ "Chinese_Ink": "์ˆ˜๋ฌตํ™”์˜ ๋ฒˆ์ง๊ณผ ๋†๋‹ด ํ‘œํ˜„",
44
+ "Clay_Toy": "์ฐฐํ™ยทํ”Œ๋ผ์Šคํ‹ด ์žฅ๋‚œ๊ฐ่ณช",
45
+ "Fabric": "์„ฌ์œ ยทํŒจ๋ธŒ๋ฆญ ์งˆ๊ฐ",
46
+ "Ghibli": "์ง€๋ธŒ๋ฆฌํ’ ๋”ฐ๋œปํ•œ ์ƒ‰๊ฐ & ์—ฐํ•„์„ ",
47
+ "Irasutoya": "์ผ๋Ÿฌ์Šคํ† ์•ผ ๋ฏธ๋‹ˆ๋ฉ€ ํ‰๋ฉด ๊ทธ๋ฆผ",
48
+ "Jojo": "์ฃ ์ฃ ์˜ ๊ธฐ๋ฌ˜ํ•œ ๋ชจํ—˜ ๋ง๊ฐ€ ํ„ฐ์น˜",
49
+ "Oil_Painting": "์œ ํ™” ๋ถ“ํ„ฐ์น˜์™€ ์งˆ๊ฐ",
50
+ "Pixel": "16/32โ€‘bit ๋ ˆํŠธ๋กœ ํ”ฝ์…€์•„ํŠธ",
51
+ "Snoopy": "ํ”ผ๋„ˆ์ธ  ์ŠคํŠธ๋ฆฝ(์Šค๋ˆ„ํ”ผ) ์Šคํƒ€์ผ",
52
+ "Poly": "๋กœ์šฐํด๋ฆฌ 3D ๊ธฐํ•˜ํ•™์  ์Šคํƒ€์ผ",
53
+ "LEGO": "๋ ˆ๊ณ  ๋ธ”๋ก ์กฐ๋ฆฝ ์Šคํƒ€์ผ",
54
+ "Origami": "์ข…์ด์ ‘๊ธฐ ์งˆ๊ฐยท๊ฐ๋„",
55
+ "Pop_Art": "ํŒ์•„ํŠธ์˜ ์„ ๋ช…ํ•œ ์ƒ‰๊ฐ๊ณผ ๋„ํŠธ",
56
+ "Van_Gogh": "๋ฐ˜ ๊ณ ํ์˜ ๊ตต์€ ์ž„ํŒŒ์Šคํ† ",
57
+ "Paper_Cutting": "์ข…์ด ์˜ค๋ฆฌ๊ธฐ ์‹ค๋ฃจ์—ฃ",
58
+ "Line": "๊น”๋”ํ•œ ๋ผ์ธ ๋“œ๋กœ์ž‰",
59
+ "Vector": "๋ฒกํ„ฐ ๊ทธ๋ž˜ํ”ฝยทํ”Œ๋žซ ๋””์ž์ธ",
60
+ "Picasso": "ํ”ผ์นด์†Œ์‹ ์ž…์ฒด์ฃผ์˜ ํ๋น„์ฆ˜",
61
+ "Macaron": "ํŒŒ์Šคํ…” ๋งˆ์นด๋กฑํ†ค ๋ถ€๋“œ๋Ÿฌ์›€",
62
+ "Rick_Morty": "๋ฆญ ์•ค ๋ชจํ‹ฐ ์• ๋‹ˆ๋ฉ”์ด์…˜ ์Šคํƒ€์ผ"
63
+ }
64
+
65
+ # ์ „์—ญ ํŒŒ์ดํ”„๋ผ์ธ
66
+ pipe = None
67
+
68
+
69
+ def get_dtype():
70
+ """์ตœ์  dtype(bf16 ์ง€์› ์‹œ bf16, ์•„๋‹ˆ๋ฉด fp16) ์„ ํƒ"""
71
+ if torch.cuda.is_available():
72
+ major, _ = torch.cuda.get_device_capability()
73
+ if major >= 8: # Ada/Hopper GPU๋Š” bf16 ๋ณธ๊ฒฉ ์ง€์›
74
+ return torch.bfloat16
75
+ return torch.float16
76
+
77
 
78
  def load_pipeline():
79
+ """FluxKontext ํŒŒ์ดํ”„๋ผ์ธ์„ (์ง€์—ฐ)๋กœ๋“œ"""
80
+ global pipe
81
+ if pipe is None:
82
+ gr.Info("โฌ‡๏ธ FLUX.1โ€‘Kontext ๋ชจ๋ธ์„ ๋‹ค์šด๋กœ๋“œํ•ฉ๋‹ˆ๋‹คโ€ฆ")
83
+
84
+ pipe = FluxKontextPipeline.from_pretrained(
85
+ "black-forest-labs/FLUX.1-Kontext-dev",
86
+ torch_dtype=get_dtype(),
87
+ resume_download=True,
88
  )
 
89
 
90
+ device = "cuda" if torch.cuda.is_available() else "cpu"
91
+ pipe.to(device)
92
+
93
+ # VRAM ์ ˆ์•ฝ ๋ชจ๋“œ
94
+ if torch.cuda.is_available():
95
+ pipe.enable_sequential_cpu_offload()
96
+ pipe.vae.enable_tiling()
97
+
98
+ return pipe
99
+
100
+
101
+ @spaces.GPU(duration=600) # ์ดˆ๊ธฐ ๋‹ค์šด๋กœ๋“œ ์‹œ๊ฐ„ ํ™•๋ณด
102
+ def style_transfer(
103
+ input_image,
104
+ style_name,
105
+ prompt_suffix,
106
+ num_inference_steps,
107
+ guidance_scale,
108
+ seed,
109
+ ):
110
+ """์„ ํƒํ•œ ์Šคํƒ€์ผ๋กœ ์ด๋ฏธ์ง€ ๋ณ€ํ™˜"""
111
+
112
  if input_image is None:
113
+ gr.Warning("๐Ÿ–ผ๏ธ ๋จผ์ € ์ด๋ฏธ์ง€๋ฅผ ์—…๋กœ๋“œํ•˜์„ธ์š”!")
114
  return None
115
+
116
  try:
 
117
  pipe = load_pipeline()
118
+
119
+ # ์‹œ๋“œ ๊ณ ์ • (0์ด๋ฉด ๋‚œ์ˆ˜)
120
+ generator = None
121
+ if seed and int(seed) != 0:
122
+ generator = torch.Generator(device=pipe.device).manual_seed(int(seed))
123
+
124
+ # ์ด๋ฏธ์ง€ ๋กœ๋“œ & ๋ฆฌ์‚ฌ์ด์ฆˆ
125
+ img = load_image(input_image) if isinstance(input_image, str) else input_image
126
+ img = img.convert("RGB").resize((1024, 1024), Image.Resampling.LANCZOS)
127
+
128
+ # LoRA ๋กœ๋”ฉ
129
+ lora_file = STYLE_LORA_MAP[style_name]
130
+ adapter_name = "style"
 
 
 
 
 
 
 
 
 
 
 
 
131
  pipe.load_lora_weights(
132
  "Owen777/Kontext-Style-Loras",
133
+ weight_name=lora_file,
134
+ adapter_name=adapter_name,
135
  )
136
+ pipe.set_adapters([adapter_name], adapter_weights=[1.0])
137
+
138
+ # ํ”„๋กฌํ”„ํŠธ ๊ตฌ์„ฑ
139
+ prompt = f"Turn this image into the {style_name.replace('_', ' ')} style."
 
140
  if prompt_suffix and prompt_suffix.strip():
141
  prompt += f" {prompt_suffix.strip()}"
142
+
143
+ gr.Info("๐ŸŽจ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑ ์ค‘โ€ฆ")
144
+
 
145
  result = pipe(
146
+ image=img,
147
  prompt=prompt,
148
+ guidance_scale=float(guidance_scale),
149
+ num_inference_steps=int(num_inference_steps),
150
  generator=generator,
151
  height=1024,
152
+ width=1024,
153
  )
154
+
155
+ # LoRA ์–ธ๋กœ๋“œ ๋ฐ ์บ์‹œ ์ •๋ฆฌ
156
  pipe.unload_lora_weights()
157
  torch.cuda.empty_cache()
158
+
159
  return result.images[0]
160
+
161
  except Exception as e:
162
+ gr.Error(f"๐Ÿšจ ์˜ค๋ฅ˜: {e}")
163
  torch.cuda.empty_cache()
164
  return None
165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
 
167
+ def update_description(style):
168
+ return STYLE_DESCRIPTIONS.get(style, "")
169
+
170
+
171
  with gr.Blocks(title="FLUX.1 Kontext Style Transfer", theme=gr.themes.Soft()) as demo:
172
+ gr.Markdown(
173
+ """
174
+ # ๐ŸŽจ FLUX.1 Kontext Style Transfer
175
+ FLUX.1โ€‘Kontextโ€‘dev ๋ชจ๋ธ๊ณผ 22๊ฐœ์˜ ๊ณ ํ’ˆ์งˆ LoRA๋กœ ์ด๋ฏธ์ง€๋ฅผ ๋‹ค์–‘ํ•œ ์˜ˆ์ˆ  ์Šคํƒ€์ผ๋กœ ๋ณ€ํ™˜ํ•˜์„ธ์š”.
176
+ """
177
+ )
178
+
 
179
  with gr.Row():
180
  with gr.Column(scale=1):
181
+ input_image = gr.Image(label="Upload Image", type="pil", height=400)
182
+
 
 
 
 
183
  style_dropdown = gr.Dropdown(
184
+ choices=list(STYLE_LORA_MAP.keys()),
185
  value="Ghibli",
186
  label="Select Style",
187
+ info="Choose from 22 different artistic styles",
188
  )
189
+
190
  style_info = gr.Textbox(
191
  label="Style Description",
192
+ value=STYLE_DESCRIPTIONS["Ghibli"],
193
  interactive=False,
194
+ lines=2,
195
  )
196
+
197
  prompt_suffix = gr.Textbox(
198
  label="Additional Instructions (Optional)",
199
+ placeholder="์˜ˆ: 'make it more colorful', 'add dramatic lighting' โ€ฆ",
200
+ lines=2,
201
  )
202
+
203
  with gr.Accordion("Advanced Settings", open=False):
204
  num_steps = gr.Slider(
205
+ 10,
206
+ 50,
207
  value=24,
208
  step=1,
209
  label="Inference Steps",
210
+ info="๋” ๋†’์„์ˆ˜๋ก ํ’ˆ์งˆโ†‘ ์†๋„โ†“",
211
  )
 
212
  guidance = gr.Slider(
213
+ 1.0,
214
+ 7.5,
215
  value=2.5,
216
  step=0.1,
217
  label="Guidance Scale",
218
+ info="ํ”„๋กฌํ”„ํŠธ ์ค€์ˆ˜ ์ •๋„",
 
 
 
 
 
 
219
  )
220
+ seed = gr.Number(label="Seed (0 = Random)", value=0)
221
+
222
+ generate_btn = gr.Button(
223
+ "๐ŸŽจ Transform Image", variant="primary", size="lg"
224
+ )
225
+
226
  with gr.Column(scale=1):
227
+ output_image = gr.Image(label="Styled Result", type="pil", height=400)
228
+
229
+ gr.Markdown(
230
+ """### ๐Ÿ’ก Tips:
231
+ - ๋ชจ๋“  ์ด๋ฏธ์ง€๋Š” 1024ร—1024๋กœ ๋ฆฌ์‚ฌ์ด์ฆˆ๋ฉ๋‹ˆ๋‹ค.
232
+ - ์ฒซ ์‹คํ–‰ ์‹œ 7โ€ฏGB ๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ๊ฐ€ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค.
233
+ - ์Šคํƒ€์ผ ๋ณ€ํ™˜์€ ์•ฝ 30โ€‘60โ€ฏ์ดˆ ์†Œ์š”๋ฉ๋‹ˆ๋‹ค.
234
+ - ๋‹ค๋ฅธ ์Šคํƒ€์ผ๋„ ์‹œํ—˜ํ•ด ๋ณด์„ธ์š”!"""
235
  )
236
+
237
+ # ์ด๋ฒคํŠธ ๋ฐ”์ธ๋”ฉ
238
+ style_dropdown.change(update_description, [style_dropdown], [style_info])
239
+
240
+ generate_btn.click(
241
+ style_transfer,
242
+ inputs=[
243
+ input_image,
244
+ style_dropdown,
245
+ prompt_suffix,
246
+ num_steps,
247
+ guidance,
248
+ seed,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
  ],
250
+ outputs=[output_image],
 
 
 
251
  )
252
+
253
+ gr.Markdown(
254
+ """
255
+ ---
256
+ Created with โค๏ธ by [Blackโ€‘Forest Labs](https://huggingface.co/black-forest-labs) &
257
+ [Owen777/Kontextโ€‘Styleโ€‘Loras](https://huggingface.co/Owen777/Kontext-Style-Loras)
258
+ """
259
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
 
261
  if __name__ == "__main__":
262
+ demo.launch()