seawolf2357 commited on
Commit
31a30b8
ยท
verified ยท
1 Parent(s): 5f18011

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +177 -171
app.py CHANGED
@@ -1,60 +1,13 @@
1
- """
2
- FLUX.1โ€ฏKontext Style Transfer
3
- =============================
4
- Updatedย :ย 2025โ€‘07โ€‘12ย (HF_TOKENย ์ง€์›ย +ย ์ „์ฒด ์ฝ”๋“œ ์™„์„ฑ)
5
- ---------------------------------------------------
6
- Gradio ๋ฐ๋ชจ๋กœ ์ด๋ฏธ์ง€๋ฅผ 22โ€ฏ์ข… ์˜ˆ์ˆ  ์Šคํƒ€์ผ๋กœ ๋ณ€ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
7
- - **HF_TOKEN**ย ํ™˜๊ฒฝ๋ณ€์ˆ˜๋ฅผ ์ž๋™ ์ธ์‹ํ•ด ๋ผ์ด์„ ์Šค ๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ ์˜ค๋ฅ˜๋ฅผ ๋ฐฉ์ง€ํ•ฉ๋‹ˆ๋‹ค.
8
- - ์ตœ์ดˆ ์‹คํ–‰ ์‹œ ๋ชจ๋ธยทLoRA๋ฅผ ์บ์‹œ์— ๋ฐ›์•„ ๋‘๊ณ , ์ดํ›„์—๋Š” ์žฌ๋‹ค์šด๋กœ๋“œ๊ฐ€ ์—†์Šต๋‹ˆ๋‹ค.
9
- - GPUโ€ฏVRAM์„ ๊ฐ์ง€ํ•˜์—ฌ 24โ€ฏGBโ€ฏ๋ฏธ๋งŒ์—์„œ๋Š” FP16โ€ฏ+โ€ฏCPUย offload๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค.
10
- - ํŒŒ์ดํ”„๋ผ์ธยทLoRA ๋กœ๋”ฉ ๋ฉ”์‹œ์ง€๋Š” ์ตœ์ดˆ 1ํšŒ๋งŒ ํ‘œ์‹œ๋ฉ๋‹ˆ๋‹ค.
11
- """
12
-
13
- import os
14
  import gradio as gr
15
  import spaces
16
  import torch
17
- from huggingface_hub import snapshot_download
18
- from huggingface_hub.errors import LocalTokenNotFoundError
19
  from diffusers import FluxKontextPipeline
20
  from diffusers.utils import load_image
21
  from PIL import Image
 
22
 
23
- # ------------------------------------------------------------------
24
- # ํ™˜๊ฒฝ ์„ค์ • & ๋ชจ๋ธ / LoRA ์‚ฌ์ „ ๋‹ค์šด๋กœ๋“œ
25
- # ------------------------------------------------------------------
26
- os.environ.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "1") # ๋น ๋ฅธ ๋‹ค์šด๋กœ๋“œ
27
-
28
- MODEL_ID = "black-forest-labs/FLUX.1-Kontext-dev"
29
- LORA_REPO = "Owen777/Kontext-Style-Loras"
30
- CACHE_DIR = os.getenv("HF_HOME", os.path.expanduser("~/.cache/huggingface"))
31
- HF_TOKEN = os.getenv("HF_TOKEN") # ๋Ÿฐํƒ€์ž„์— ์ฃผ์ž…ํ•˜๊ฑฐ๋‚˜ Secrets ์‚ฌ์šฉ
32
-
33
-
34
- def _download_with_token(repo_id: str) -> str:
35
- """Download repo snapshot with optional token handling."""
36
- try:
37
- return snapshot_download(
38
- repo_id=repo_id,
39
- cache_dir=CACHE_DIR,
40
- resume_download=True,
41
- token=HF_TOKEN if HF_TOKEN else True, # True โ†’ ๋กœ๊ทธ์ธ ์„ธ์…˜ ์‚ฌ์šฉ
42
- )
43
- except LocalTokenNotFoundError:
44
- raise RuntimeError(
45
- "Huggingย Face ํ† ํฐ์ด ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. ํ™˜๊ฒฝ๋ณ€์ˆ˜ HF_TOKEN์„ ์„ค์ •ํ•˜๊ฑฐ๋‚˜\n"
46
- "`huggingface-cli login`์œผ๋กœ ๋กœ๊ทธ์ธํ•ด ์ฃผ์„ธ์š”."
47
- )
48
-
49
-
50
- # ์ตœ์ดˆ ์‹คํ–‰ ์‹œ ์บ์‹œ์—๋งŒ ๋‹ค์šด๋กœ๋“œ (์ด๋ฏธ ์žˆ์œผ๋ฉด ์ฆ‰์‹œ ๋ฐ˜ํ™˜)
51
- MODEL_DIR = _download_with_token(MODEL_ID)
52
- LORA_DIR = _download_with_token(LORA_REPO)
53
-
54
- # ------------------------------------------------------------------
55
- # ์Šคํƒ€์ผย โ†’ย LoRA ํŒŒ์ผ ๋งคํ•‘ & ์„ค๋ช…
56
- # ------------------------------------------------------------------
57
- STYLE_LORA_MAP = {
58
  "3D_Chibi": "3D_Chibi_lora_weights.safetensors",
59
  "American_Cartoon": "American_Cartoon_lora_weights.safetensors",
60
  "Chinese_Ink": "Chinese_Ink_lora_weights.safetensors",
@@ -76,10 +29,11 @@ STYLE_LORA_MAP = {
76
  "Vector": "Vector_lora_weights.safetensors",
77
  "Picasso": "Picasso_lora_weights.safetensors",
78
  "Macaron": "Macaron_lora_weights.safetensors",
79
- "Rick_Morty": "Rick_Morty_lora_weights.safetensors",
80
  }
81
 
82
- STYLE_DESCRIPTIONS = {
 
83
  "3D_Chibi": "Cute, miniature 3D character style with big heads",
84
  "American_Cartoon": "Classic American animation style",
85
  "Chinese_Ink": "Traditional Chinese ink painting aesthetic",
@@ -101,181 +55,233 @@ STYLE_DESCRIPTIONS = {
101
  "Vector": "Clean vector graphics style",
102
  "Picasso": "Cubist art style inspired by Picasso",
103
  "Macaron": "Soft, pastel macaron-like style",
104
- "Rick_Morty": "Rick and Morty cartoon style",
105
  }
106
 
107
- # ------------------------------------------------------------------
108
- # ํŒŒ์ดํ”„๋ผ์ธ ๋กœ๋” (์‹ฑ๊ธ€ํ„ด)
109
- # ------------------------------------------------------------------
110
- _pipeline = None
111
-
112
 
113
  def load_pipeline():
114
- """Load or return cached FluxKontextPipeline."""
115
- global _pipeline
116
- if _pipeline is not None:
117
- return _pipeline
118
-
119
- # VRAM ํŒ๋ณ„ โ†’ dtype & offload ์„ค์ •
120
- vram_gb = torch.cuda.get_device_properties(0).total_memory / 1024**3
121
- dtype = torch.bfloat16 if vram_gb >= 24 else torch.float16
122
-
123
- gr.Info("FLUX.1โ€‘Kontext ํŒŒ์ดํ”„๋ผ์ธ ๋กœ๋”ฉ ์ค‘โ€ฆย (์ตœ์ดˆ 1ํšŒ)")
124
-
125
- pipe = FluxKontextPipeline.from_pretrained(
126
- MODEL_DIR,
127
- torch_dtype=dtype,
128
- local_files_only=True,
129
- ).to("cuda")
130
-
131
- if vram_gb < 24:
132
- pipe.enable_sequential_cpu_offload()
133
- else:
134
- pipe.enable_model_cpu_offload()
135
-
136
- _pipeline = pipe
137
- return _pipeline
138
-
139
- # ------------------------------------------------------------------
140
- # ์Šคํƒ€์ผ ๋ณ€ํ™˜ ํ•จ์ˆ˜
141
- # ------------------------------------------------------------------
142
- @spaces.GPU(duration=600)
143
 
 
144
  def style_transfer(input_image, style_name, prompt_suffix, num_inference_steps, guidance_scale, seed):
145
- """Apply selected style to the uploaded image."""
 
 
146
  if input_image is None:
147
  gr.Warning("Please upload an image first!")
148
  return None
149
-
150
  try:
 
151
  pipe = load_pipeline()
152
-
153
- # Torch Generator (seed ๊ณ ์ • ์‹œ ์žฌํ˜„ ๊ฐ€๋Šฅ)
 
 
 
 
154
  generator = None
155
- if seed and int(seed) > 0:
156
- generator = torch.Generator(device="cuda").manual_seed(int(seed))
157
-
158
- # ์ž…๋ ฅ ์ด๋ฏธ์ง€ ์ „์ฒ˜๋ฆฌ
159
- img = input_image if isinstance(input_image, Image.Image) else load_image(input_image)
160
- img = img.convert("RGB").resize((1024, 1024), Image.Resampling.LANCZOS)
161
-
162
- # LoRA ๋กœ๋“œ
163
- lora_file = STYLE_LORA_MAP[style_name]
164
- adapter_name = "style"
165
- pipe.load_lora_weights(LORA_DIR, weight_name=lora_file, adapter_name=adapter_name)
166
- pipe.set_adapters([adapter_name], [1.0])
167
-
168
- # ํ”„๋กฌํ”„ํŠธ ๊ตฌ์„ฑ
169
- readable_style = style_name.replace("_", " ")
170
- prompt = f"Turn this image into the {readable_style} style."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  if prompt_suffix and prompt_suffix.strip():
172
  prompt += f" {prompt_suffix.strip()}"
173
-
174
- gr.Info("Generating styled imageโ€ฆย (20โ€‘60โ€ฏs)")
175
-
 
176
  result = pipe(
177
- image=img,
178
  prompt=prompt,
179
- guidance_scale=float(guidance_scale),
180
- num_inference_steps=int(num_inference_steps),
181
  generator=generator,
182
  height=1024,
183
- width=1024,
184
  )
185
-
186
- # LoRA ์–ธ๋กœ๋“œ & ๋ฉ”๋ชจ๋ฆฌ ์ •๋ฆฌ
187
- pipe.unload_lora_weights(adapter_name=adapter_name)
188
  torch.cuda.empty_cache()
189
-
190
  return result.images[0]
191
-
192
  except Exception as e:
 
 
193
  torch.cuda.empty_cache()
194
- gr.Error(f"Error during style transfer: {e}")
195
  return None
196
 
197
- # ------------------------------------------------------------------
198
- # Gradio UI
199
- # ------------------------------------------------------------------
200
-
201
- def update_description(style):
202
- return STYLE_DESCRIPTIONS.get(style, "")
203
-
204
-
205
  with gr.Blocks(title="FLUX.1 Kontext Style Transfer", theme=gr.themes.Soft()) as demo:
206
- gr.Markdown(
207
- """
208
- # ๐ŸŽจ FLUX.1 Kontext Style Transfer
209
-
210
- ์—…๋กœ๋“œํ•œ ์ด๋ฏธ์ง€๋ฅผ 22โ€ฏ์ข… ์˜ˆ์ˆ  ์Šคํƒ€์ผ๋กœ ๋ณ€ํ™˜ํ•˜์„ธ์š”!
211
- (๋ชจ๋ธโ€ฏ/โ€ฏLoRA๋Š” ์ตœ์ดˆ ์‹คํ–‰ ์‹œ์—๋งŒ ๋‹ค์šด๋กœ๋“œ๋˜๋ฉฐ, ์ดํ›„ ์‹คํ–‰์€ ๋น ๋ฆ…๋‹ˆ๋‹ค.)
212
- """
213
- )
214
-
215
  with gr.Row():
216
  with gr.Column(scale=1):
217
- input_image = gr.Image(label="Upload Image", type="pil", height=400)
 
 
 
 
 
218
  style_dropdown = gr.Dropdown(
219
- choices=list(STYLE_LORA_MAP.keys()),
220
  value="Ghibli",
221
  label="Select Style",
 
222
  )
 
223
  style_info = gr.Textbox(
224
  label="Style Description",
225
- value=STYLE_DESCRIPTIONS["Ghibli"],
226
  interactive=False,
227
- lines=2,
228
  )
 
229
  prompt_suffix = gr.Textbox(
230
  label="Additional Instructions (Optional)",
231
- placeholder="e.g. add dramatic lighting",
232
- lines=2,
233
  )
234
-
235
  with gr.Accordion("Advanced Settings", open=False):
236
- num_steps = gr.Slider(10, 50, value=24, step=1, label="Inference Steps")
237
- guidance = gr.Slider(1.0, 7.5, value=2.5, step=0.1, label="Guidance Scale")
238
- seed = gr.Number(label="Seed (0 = random)", value=42)
239
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
  generate_btn = gr.Button("๐ŸŽจ Transform Image", variant="primary", size="lg")
241
-
242
  with gr.Column(scale=1):
243
- output_image = gr.Image(label="Styled Result", type="pil", height=400)
244
- gr.Markdown(
245
- """
246
- ### ๐Ÿ’ก Tips
247
- - ๋ชจ๋ธ(7โ€ฏGB)ยทLoRA๋Š” ์ตœ์ดˆ ์‹คํ–‰ ์‹œ์—๋งŒ ๋‹ค์šด๋กœ๋“œ๋ฉ๋‹ˆ๋‹ค.
248
- - ์ด๋ฏธ์ง€๋Š” 1024ร—1024๋กœ ๋ฆฌ์‚ฌ์ด์ฆˆ ํ›„ ์ฒ˜๋ฆฌ๋ฉ๋‹ˆ๋‹ค.
249
- - VRAMย <ย 24โ€ฏGB์ธ ๊ฒฝ์šฐ ์ž๋™์œผ๋กœ FP16โ€ฏ+โ€ฏCPU offload๊ฐ€ ์ ์šฉ๋ฉ๋‹ˆ๋‹ค.
250
- - seedย ๊ฐ’์„ ๋ณ€๊ฒฝํ•ด ๋‹ค์–‘ํ•œ ๊ฒฐ๊ณผ๋ฅผ ์–ป์–ด ๋ณด์„ธ์š”!
251
- """
252
  )
253
-
254
- # ์Šคํƒ€์ผ ์„ค๋ช… ์ž๋™ ์—…๋ฐ์ดํŠธ
255
- style_dropdown.change(update_description, inputs=[style_dropdown], outputs=[style_info])
256
-
257
- # ์˜ˆ์ œ ์ƒ˜ํ”Œ
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
  gr.Examples(
259
  examples=[
260
  ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "Ghibli", ""],
261
  ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "3D_Chibi", "make it extra cute"],
262
  ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "Van_Gogh", "with swirling sky"],
263
  ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "Pixel", "8-bit retro game style"],
264
- ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "Chinese_Ink", "mountain landscape"],
265
- ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "LEGO", "colorful blocks"],
266
  ],
267
  inputs=[input_image, style_dropdown, prompt_suffix],
268
  outputs=output_image,
269
- fn=lambda img, style, prompt: style_transfer(img, style, prompt, 24, 2.5, 42),
270
- cache_examples=False,
271
  )
272
-
273
- # ๋ฒ„ํŠผ ํด๋ฆญ ์—ฐ๊ฒฐ
274
  generate_btn.click(
275
  fn=style_transfer,
276
  inputs=[input_image, style_dropdown, prompt_suffix, num_steps, guidance, seed],
277
- outputs=output_image,
278
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
 
280
  if __name__ == "__main__":
281
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import spaces
3
  import torch
 
 
4
  from diffusers import FluxKontextPipeline
5
  from diffusers.utils import load_image
6
  from PIL import Image
7
+ import os
8
 
9
+ # Style dictionary
10
+ style_type_lora_dict = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  "3D_Chibi": "3D_Chibi_lora_weights.safetensors",
12
  "American_Cartoon": "American_Cartoon_lora_weights.safetensors",
13
  "Chinese_Ink": "Chinese_Ink_lora_weights.safetensors",
 
29
  "Vector": "Vector_lora_weights.safetensors",
30
  "Picasso": "Picasso_lora_weights.safetensors",
31
  "Macaron": "Macaron_lora_weights.safetensors",
32
+ "Rick_Morty": "Rick_Morty_lora_weights.safetensors"
33
  }
34
 
35
+ # Style descriptions
36
+ style_descriptions = {
37
  "3D_Chibi": "Cute, miniature 3D character style with big heads",
38
  "American_Cartoon": "Classic American animation style",
39
  "Chinese_Ink": "Traditional Chinese ink painting aesthetic",
 
55
  "Vector": "Clean vector graphics style",
56
  "Picasso": "Cubist art style inspired by Picasso",
57
  "Macaron": "Soft, pastel macaron-like style",
58
+ "Rick_Morty": "Rick and Morty cartoon style"
59
  }
60
 
61
+ # Initialize pipeline globally
62
+ pipeline = None
63
+ pipeline_loaded = False
 
 
64
 
65
  def load_pipeline():
66
+ global pipeline, pipeline_loaded
67
+ if pipeline is None:
68
+ print("Loading FLUX.1-Kontext-dev model...")
69
+ # HF_TOKEN ์ž๋™ ๊ฐ์ง€
70
+ token = os.getenv("HF_TOKEN", True)
71
+
72
+ pipeline = FluxKontextPipeline.from_pretrained(
73
+ "black-forest-labs/FLUX.1-Kontext-dev",
74
+ torch_dtype=torch.bfloat16,
75
+ use_auth_token=token
76
+ )
77
+ pipeline_loaded = True
78
+ return pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
+ @spaces.GPU(duration=120)
81
  def style_transfer(input_image, style_name, prompt_suffix, num_inference_steps, guidance_scale, seed):
82
+ """
83
+ Apply style transfer to the input image using selected style
84
+ """
85
  if input_image is None:
86
  gr.Warning("Please upload an image first!")
87
  return None
88
+
89
  try:
90
+ # Load pipeline and move to GPU
91
  pipe = load_pipeline()
92
+ pipe = pipe.to('cuda')
93
+
94
+ # Enable memory efficient settings
95
+ pipe.enable_model_cpu_offload()
96
+
97
+ # Set seed for reproducibility
98
  generator = None
99
+ if seed > 0:
100
+ generator = torch.Generator(device="cuda").manual_seed(seed)
101
+
102
+ # Process input image
103
+ if isinstance(input_image, str):
104
+ image = load_image(input_image)
105
+ else:
106
+ image = input_image
107
+
108
+ # Ensure RGB and resize to 1024x1024
109
+ image = image.convert("RGB").resize((1024, 1024), Image.Resampling.LANCZOS)
110
+
111
+ # Load the selected LoRA
112
+ lora_filename = style_type_lora_dict[style_name]
113
+
114
+ # Clear any previously loaded LoRA
115
+ try:
116
+ pipe.unload_lora_weights()
117
+ except:
118
+ pass
119
+
120
+ # Load LoRA weights
121
+ pipe.load_lora_weights(
122
+ "Owen777/Kontext-Style-Loras",
123
+ weight_name=lora_filename,
124
+ adapter_name="style"
125
+ )
126
+ pipe.set_adapters(["style"], adapter_weights=[1.0])
127
+
128
+ # Create prompt for style transformation
129
+ style_name_readable = style_name.replace('_', ' ')
130
+ prompt = f"Turn this image into the {style_name_readable} style."
131
  if prompt_suffix and prompt_suffix.strip():
132
  prompt += f" {prompt_suffix.strip()}"
133
+
134
+ print(f"Generating with prompt: {prompt}")
135
+
136
+ # Generate the styled image
137
  result = pipe(
138
+ image=image,
139
  prompt=prompt,
140
+ guidance_scale=guidance_scale,
141
+ num_inference_steps=num_inference_steps,
142
  generator=generator,
143
  height=1024,
144
+ width=1024
145
  )
146
+
147
+ # Clear GPU memory
 
148
  torch.cuda.empty_cache()
149
+
150
  return result.images[0]
151
+
152
  except Exception as e:
153
+ print(f"Error: {str(e)}")
154
+ gr.Error(f"Error during style transfer: {str(e)}")
155
  torch.cuda.empty_cache()
 
156
  return None
157
 
158
+ # Create Gradio interface
 
 
 
 
 
 
 
159
  with gr.Blocks(title="FLUX.1 Kontext Style Transfer", theme=gr.themes.Soft()) as demo:
160
+ gr.Markdown("""
161
+ # ๐ŸŽจ FLUX.1 Kontext Style Transfer
162
+
163
+ Transform your images into various artistic styles using FLUX.1-Kontext-dev and high-quality style LoRAs.
164
+
165
+ This demo uses the official Owen777/Kontext-Style-Loras collection with 22 different artistic styles!
166
+ """)
167
+
 
168
  with gr.Row():
169
  with gr.Column(scale=1):
170
+ input_image = gr.Image(
171
+ label="Upload Image",
172
+ type="pil",
173
+ height=400
174
+ )
175
+
176
  style_dropdown = gr.Dropdown(
177
+ choices=list(style_type_lora_dict.keys()),
178
  value="Ghibli",
179
  label="Select Style",
180
+ info="Choose from 22 different artistic styles"
181
  )
182
+
183
  style_info = gr.Textbox(
184
  label="Style Description",
185
+ value=style_descriptions["Ghibli"],
186
  interactive=False,
187
+ lines=2
188
  )
189
+
190
  prompt_suffix = gr.Textbox(
191
  label="Additional Instructions (Optional)",
192
+ placeholder="Add extra details like 'make it more colorful' or 'add dramatic lighting'...",
193
+ lines=2
194
  )
195
+
196
  with gr.Accordion("Advanced Settings", open=False):
197
+ num_steps = gr.Slider(
198
+ minimum=10,
199
+ maximum=50,
200
+ value=24,
201
+ step=1,
202
+ label="Inference Steps",
203
+ info="More steps = better quality but slower"
204
+ )
205
+
206
+ guidance = gr.Slider(
207
+ minimum=1.0,
208
+ maximum=5.0,
209
+ value=2.5,
210
+ step=0.1,
211
+ label="Guidance Scale",
212
+ info="How closely to follow the prompt (2.5 recommended)"
213
+ )
214
+
215
+ seed = gr.Number(
216
+ label="Seed",
217
+ value=42,
218
+ precision=0,
219
+ info="Set to 0 for random results"
220
+ )
221
+
222
  generate_btn = gr.Button("๐ŸŽจ Transform Image", variant="primary", size="lg")
223
+
224
  with gr.Column(scale=1):
225
+ output_image = gr.Image(
226
+ label="Styled Result",
227
+ type="pil",
228
+ height=400
 
 
 
 
 
229
  )
230
+
231
+ gr.Markdown("""
232
+ ### ๐Ÿ’ก Tips:
233
+ - All images are resized to 1024x1024
234
+ - First run downloads the model (~12GB)
235
+ - Each style transformation takes ~30-60 seconds
236
+ - Try different styles to find the best match!
237
+ - Use additional instructions for fine control
238
+ """)
239
+
240
+ # Update style description when style changes
241
+ def update_description(style):
242
+ return style_descriptions.get(style, "")
243
+
244
+ style_dropdown.change(
245
+ fn=update_description,
246
+ inputs=[style_dropdown],
247
+ outputs=[style_info]
248
+ )
249
+
250
+ # Examples
251
  gr.Examples(
252
  examples=[
253
  ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "Ghibli", ""],
254
  ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "3D_Chibi", "make it extra cute"],
255
  ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "Van_Gogh", "with swirling sky"],
256
  ["https://huggingface.co/datasets/black-forest-labs/kontext-bench/resolve/main/test/images/0003.jpg", "Pixel", "8-bit retro game style"],
 
 
257
  ],
258
  inputs=[input_image, style_dropdown, prompt_suffix],
259
  outputs=output_image,
260
+ fn=style_transfer,
261
+ cache_examples=False
262
  )
263
+
264
+ # Connect the generate button
265
  generate_btn.click(
266
  fn=style_transfer,
267
  inputs=[input_image, style_dropdown, prompt_suffix, num_steps, guidance, seed],
268
+ outputs=output_image
269
  )
270
+
271
+ gr.Markdown("""
272
+ ---
273
+ ### ๐Ÿ“š Available Styles:
274
+
275
+ **Anime/Cartoon**: Ghibli, American Cartoon, Jojo, Snoopy, Rick & Morty, Irasutoya
276
+ **3D/Geometric**: 3D Chibi, Poly, LEGO, Clay Toy
277
+ **Traditional Art**: Chinese Ink, Oil Painting, Van Gogh, Picasso, Pop Art
278
+ **Craft/Material**: Fabric, Origami, Paper Cutting, Macaron
279
+ **Digital/Modern**: Pixel, Line, Vector
280
+
281
+ ---
282
+
283
+ Created with โค๏ธ using [Owen777/Kontext-Style-Loras](https://huggingface.co/Owen777/Kontext-Style-Loras)
284
+ """)
285
 
286
  if __name__ == "__main__":
287
+ demo.launch()