ginipick commited on
Commit
472d535
·
verified ·
1 Parent(s): 4ad74f6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +611 -0
app.py ADDED
@@ -0,0 +1,611 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import replicate
3
+ import os
4
+ from PIL import Image
5
+ import requests
6
+ from io import BytesIO
7
+ import time
8
+ import tempfile
9
+ import base64
10
+ import spaces
11
+ import torch
12
+ from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
13
+ from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
14
+ from diffusers.utils.export_utils import export_to_video
15
+ import numpy as np
16
+ import random
17
+ import gc
18
+
19
+ # ===========================
20
+ # Configuration
21
+ # ===========================
22
+
23
+ # Set up Replicate API key
24
+ os.environ['REPLICATE_API_TOKEN'] = os.getenv('REPLICATE_API_TOKEN')
25
+
26
+ # Video Model Configuration
27
+ VIDEO_MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
28
+ LANDSCAPE_WIDTH = 832
29
+ LANDSCAPE_HEIGHT = 480
30
+ MAX_SEED = np.iinfo(np.int32).max
31
+ FIXED_FPS = 16
32
+ MIN_FRAMES_MODEL = 8
33
+ MAX_FRAMES_MODEL = 81
34
+ MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS, 1)
35
+ MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS, 1)
36
+
37
+ default_prompt_i2v = "make this image come alive, cinematic motion, smooth animation"
38
+ default_negative_prompt = "static, still, no motion, frozen"
39
+
40
+ # ===========================
41
+ # Initialize Video Pipeline
42
+ # ===========================
43
+
44
+ # Initialize once on startup
45
+ video_pipe = None
46
+
47
+ def initialize_video_pipeline():
48
+ global video_pipe
49
+ if video_pipe is None:
50
+ try:
51
+ # Install PyTorch 2.8 (if needed)
52
+ os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces')
53
+
54
+ video_pipe = WanImageToVideoPipeline.from_pretrained(VIDEO_MODEL_ID,
55
+ transformer=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
56
+ subfolder='transformer',
57
+ torch_dtype=torch.bfloat16,
58
+ device_map='cuda',
59
+ ),
60
+ transformer_2=WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
61
+ subfolder='transformer_2',
62
+ torch_dtype=torch.bfloat16,
63
+ device_map='cuda',
64
+ ),
65
+ torch_dtype=torch.bfloat16,
66
+ ).to('cuda')
67
+
68
+ # Clear memory
69
+ for i in range(3):
70
+ gc.collect()
71
+ torch.cuda.synchronize()
72
+ torch.cuda.empty_cache()
73
+
74
+ print("Video pipeline initialized successfully!")
75
+ except Exception as e:
76
+ print(f"Error initializing video pipeline: {e}")
77
+ video_pipe = None
78
+
79
+ # ===========================
80
+ # Image Processing Functions
81
+ # ===========================
82
+
83
+ def upload_image_to_hosting(image):
84
+ """Upload image to multiple hosting services with fallback"""
85
+ # Method 1: Try imgbb.com
86
+ try:
87
+ buffered = BytesIO()
88
+ image.save(buffered, format="PNG")
89
+ buffered.seek(0)
90
+ img_base64 = base64.b64encode(buffered.getvalue()).decode()
91
+
92
+ response = requests.post(
93
+ "https://api.imgbb.com/1/upload",
94
+ data={
95
+ 'key': '6d207e02198a847aa98d0a2a901485a5',
96
+ 'image': img_base64,
97
+ }
98
+ )
99
+
100
+ if response.status_code == 200:
101
+ data = response.json()
102
+ if data.get('success'):
103
+ return data['data']['url']
104
+ except:
105
+ pass
106
+
107
+ # Method 2: Try 0x0.st
108
+ try:
109
+ buffered = BytesIO()
110
+ image.save(buffered, format="PNG")
111
+ buffered.seek(0)
112
+
113
+ files = {'file': ('image.png', buffered, 'image/png')}
114
+ response = requests.post("https://0x0.st", files=files)
115
+
116
+ if response.status_code == 200:
117
+ return response.text.strip()
118
+ except:
119
+ pass
120
+
121
+ # Method 3: Fallback to base64
122
+ buffered = BytesIO()
123
+ image.save(buffered, format="PNG")
124
+ buffered.seek(0)
125
+ img_base64 = base64.b64encode(buffered.getvalue()).decode()
126
+ return f"data:image/png;base64,{img_base64}"
127
+
128
+ def process_images(prompt, image1, image2=None):
129
+ """Process uploaded images with Replicate API"""
130
+ if not image1:
131
+ return None, "Please upload at least one image", None
132
+
133
+ if not os.getenv('REPLICATE_API_TOKEN'):
134
+ return None, "Please set REPLICATE_API_TOKEN", None
135
+
136
+ try:
137
+ image_urls = []
138
+
139
+ # Upload images
140
+ url1 = upload_image_to_hosting(image1)
141
+ image_urls.append(url1)
142
+
143
+ if image2:
144
+ url2 = upload_image_to_hosting(image2)
145
+ image_urls.append(url2)
146
+
147
+ # Run the model
148
+ output = replicate.run(
149
+ "google/nano-banana",
150
+ input={
151
+ "prompt": prompt,
152
+ "image_input": image_urls
153
+ }
154
+ )
155
+
156
+ if output is None:
157
+ return None, "No output received", None
158
+
159
+ # Get the generated image
160
+ img = None
161
+
162
+ try:
163
+ if hasattr(output, 'read'):
164
+ img_data = output.read()
165
+ img = Image.open(BytesIO(img_data))
166
+ except:
167
+ pass
168
+
169
+ if img is None:
170
+ try:
171
+ if hasattr(output, 'url'):
172
+ output_url = output.url()
173
+ response = requests.get(output_url, timeout=30)
174
+ if response.status_code == 200:
175
+ img = Image.open(BytesIO(response.content))
176
+ except:
177
+ pass
178
+
179
+ if img is None:
180
+ output_url = None
181
+ if isinstance(output, str):
182
+ output_url = output
183
+ elif isinstance(output, list) and len(output) > 0:
184
+ output_url = output[0]
185
+
186
+ if output_url:
187
+ response = requests.get(output_url, timeout=30)
188
+ if response.status_code == 200:
189
+ img = Image.open(BytesIO(response.content))
190
+
191
+ if img:
192
+ return img, "✨ Image generated successfully! You can now generate a video from this image.", img
193
+ else:
194
+ return None, "Could not process output", None
195
+
196
+ except Exception as e:
197
+ return None, f"Error: {str(e)[:100]}", None
198
+
199
+ # ===========================
200
+ # Video Generation Functions
201
+ # ===========================
202
+
203
+ def resize_image_for_video(image: Image.Image) -> Image.Image:
204
+ """Resize image for video generation"""
205
+ if image.height > image.width:
206
+ transposed = image.transpose(Image.Transpose.ROTATE_90)
207
+ resized = resize_image_landscape(transposed)
208
+ return resized.transpose(Image.Transpose.ROTATE_270)
209
+ return resize_image_landscape(image)
210
+
211
+ def resize_image_landscape(image: Image.Image) -> Image.Image:
212
+ """Resize landscape image to target dimensions"""
213
+ target_aspect = LANDSCAPE_WIDTH / LANDSCAPE_HEIGHT
214
+ width, height = image.size
215
+ in_aspect = width / height
216
+
217
+ if in_aspect > target_aspect:
218
+ new_width = round(height * target_aspect)
219
+ left = (width - new_width) // 2
220
+ image = image.crop((left, 0, left + new_width, height))
221
+ else:
222
+ new_height = round(width / target_aspect)
223
+ top = (height - new_height) // 2
224
+ image = image.crop((0, top, width, top + new_height))
225
+
226
+ return image.resize((LANDSCAPE_WIDTH, LANDSCAPE_HEIGHT), Image.LANCZOS)
227
+
228
+ @spaces.GPU(duration=120)
229
+ def generate_video(
230
+ input_image,
231
+ prompt,
232
+ steps=4,
233
+ negative_prompt=default_negative_prompt,
234
+ duration_seconds=MAX_DURATION,
235
+ guidance_scale=1,
236
+ guidance_scale_2=1,
237
+ seed=42,
238
+ randomize_seed=False,
239
+ progress=gr.Progress(track_tqdm=True),
240
+ ):
241
+ """Generate a video from an input image"""
242
+ if input_image is None:
243
+ raise gr.Error("Please generate or upload an image first.")
244
+
245
+ # Initialize pipeline if needed
246
+ initialize_video_pipeline()
247
+
248
+ if video_pipe is None:
249
+ raise gr.Error("Video pipeline not initialized. Please check GPU availability.")
250
+
251
+ num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
252
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
253
+ resized_image = resize_image_for_video(input_image)
254
+
255
+ output_frames_list = video_pipe(
256
+ image=resized_image,
257
+ prompt=prompt,
258
+ negative_prompt=negative_prompt,
259
+ height=resized_image.height,
260
+ width=resized_image.width,
261
+ num_frames=num_frames,
262
+ guidance_scale=float(guidance_scale),
263
+ guidance_scale_2=float(guidance_scale_2),
264
+ num_inference_steps=int(steps),
265
+ generator=torch.Generator(device="cuda").manual_seed(current_seed),
266
+ ).frames[0]
267
+
268
+ with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
269
+ video_path = tmpfile.name
270
+
271
+ export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
272
+
273
+ return video_path, current_seed, "🎬 Video generated successfully!"
274
+
275
+ # ===========================
276
+ # Enhanced CSS
277
+ # ===========================
278
+
279
+ css = """
280
+ .gradio-container {
281
+ background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
282
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
283
+ min-height: 100vh;
284
+ }
285
+ .header-container {
286
+ background: linear-gradient(135deg, #ffd93d 0%, #ffb347 100%);
287
+ padding: 2.5rem;
288
+ border-radius: 24px;
289
+ margin-bottom: 2.5rem;
290
+ box-shadow: 0 20px 60px rgba(255, 179, 71, 0.25);
291
+ }
292
+ .logo-text {
293
+ font-size: 3.5rem;
294
+ font-weight: 900;
295
+ color: #2d3436;
296
+ text-align: center;
297
+ margin: 0;
298
+ letter-spacing: -2px;
299
+ }
300
+ .subtitle {
301
+ color: #2d3436;
302
+ text-align: center;
303
+ font-size: 1.2rem;
304
+ margin-top: 0.5rem;
305
+ opacity: 0.9;
306
+ font-weight: 600;
307
+ }
308
+ .main-content {
309
+ background: rgba(255, 255, 255, 0.95);
310
+ backdrop-filter: blur(20px);
311
+ border-radius: 24px;
312
+ padding: 2.5rem;
313
+ box-shadow: 0 10px 40px rgba(0, 0, 0, 0.08);
314
+ margin-bottom: 2rem;
315
+ }
316
+ .gr-button-primary {
317
+ background: linear-gradient(135deg, #ffd93d 0%, #ffb347 100%) !important;
318
+ border: none !important;
319
+ color: #2d3436 !important;
320
+ font-weight: 700 !important;
321
+ font-size: 1.1rem !important;
322
+ padding: 1.2rem 2rem !important;
323
+ border-radius: 14px !important;
324
+ transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1) !important;
325
+ text-transform: uppercase;
326
+ letter-spacing: 1px;
327
+ width: 100%;
328
+ margin-top: 1rem !important;
329
+ }
330
+ .gr-button-primary:hover {
331
+ transform: translateY(-3px) !important;
332
+ box-shadow: 0 15px 40px rgba(255, 179, 71, 0.35) !important;
333
+ }
334
+ .gr-button-secondary {
335
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
336
+ border: none !important;
337
+ color: white !important;
338
+ font-weight: 700 !important;
339
+ font-size: 1.1rem !important;
340
+ padding: 1.2rem 2rem !important;
341
+ border-radius: 14px !important;
342
+ transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1) !important;
343
+ text-transform: uppercase;
344
+ letter-spacing: 1px;
345
+ width: 100%;
346
+ margin-top: 1rem !important;
347
+ }
348
+ .gr-button-secondary:hover {
349
+ transform: translateY(-3px) !important;
350
+ box-shadow: 0 15px 40px rgba(102, 126, 234, 0.35) !important;
351
+ }
352
+ .section-title {
353
+ font-size: 1.8rem;
354
+ font-weight: 800;
355
+ color: #2d3436;
356
+ margin-bottom: 1rem;
357
+ padding-bottom: 0.5rem;
358
+ border-bottom: 3px solid #ffd93d;
359
+ }
360
+ .status-text {
361
+ font-family: 'SF Mono', 'Monaco', monospace;
362
+ color: #00b894;
363
+ font-size: 0.9rem;
364
+ }
365
+ .image-container {
366
+ border-radius: 14px !important;
367
+ overflow: hidden;
368
+ border: 2px solid #e1e8ed !important;
369
+ background: #fafbfc !important;
370
+ }
371
+ footer {
372
+ display: none !important;
373
+ }
374
+ """
375
+
376
+ # ===========================
377
+ # Gradio Interface
378
+ # ===========================
379
+
380
+ with gr.Blocks(css=css, theme=gr.themes.Base()) as demo:
381
+ # Shared state for passing image between tabs
382
+ generated_image_state = gr.State(None)
383
+
384
+ with gr.Column(elem_classes="header-container"):
385
+ gr.HTML("""
386
+ <h1 class="logo-text">🍌 Open Nano Banana + Video</h1>
387
+ <p class="subtitle">AI-Powered Image Style Transfer with Video Generation</p>
388
+ <div style="display: flex; justify-content: center; align-items: center; gap: 10px; margin-top: 20px;">
389
+ <a href="https://huggingface.co/spaces/openfree/Nano-Banana-Upscale" target="_blank">
390
+ <img src="https://img.shields.io/static/v1?label=NANO%20BANANA&message=UPSCALE&color=%230000ff&labelColor=%23800080&logo=GOOGLE&logoColor=white&style=for-the-badge" alt="Nano Banana Upscale">
391
+ </a>
392
+ <a href="https://discord.gg/openfreeai" target="_blank">
393
+ <img src="https://img.shields.io/static/v1?label=Discord&message=Openfree%20AI&color=%230000ff&labelColor=%23800080&logo=discord&logoColor=white&style=for-the-badge" alt="Discord Openfree AI">
394
+ </a>
395
+ </div>
396
+ """)
397
+
398
+ with gr.Tabs():
399
+ # Tab 1: Image Generation
400
+ with gr.TabItem("🎨 Step 1: Generate Image"):
401
+ with gr.Column(elem_classes="main-content"):
402
+ gr.HTML('<h2 class="section-title">🎨 Image Style Transfer</h2>')
403
+
404
+ with gr.Row(equal_height=True):
405
+ with gr.Column(scale=1):
406
+ style_prompt = gr.Textbox(
407
+ label="Style Description",
408
+ placeholder="Describe your style...",
409
+ lines=3,
410
+ value="Make the sheets in the style of the logo. Make the scene natural.",
411
+ )
412
+
413
+ with gr.Row(equal_height=True):
414
+ image1 = gr.Image(
415
+ label="Primary Image",
416
+ type="pil",
417
+ height=200,
418
+ elem_classes="image-container"
419
+ )
420
+ image2 = gr.Image(
421
+ label="Secondary Image (Optional)",
422
+ type="pil",
423
+ height=200,
424
+ elem_classes="image-container"
425
+ )
426
+
427
+ generate_img_btn = gr.Button(
428
+ "Generate Image ✨",
429
+ variant="primary",
430
+ size="lg"
431
+ )
432
+
433
+ with gr.Column(scale=1):
434
+ output_image = gr.Image(
435
+ label="Generated Result",
436
+ type="pil",
437
+ height=420,
438
+ elem_classes="image-container"
439
+ )
440
+
441
+ img_status = gr.Textbox(
442
+ label="Status",
443
+ interactive=False,
444
+ lines=1,
445
+ elem_classes="status-text",
446
+ value="Ready to generate image..."
447
+ )
448
+
449
+ send_to_video_btn = gr.Button(
450
+ "Send to Video Generation →",
451
+ variant="secondary",
452
+ size="lg",
453
+ visible=False
454
+ )
455
+
456
+ # Tab 2: Video Generation
457
+ with gr.TabItem("🎬 Step 2: Generate Video"):
458
+ with gr.Column(elem_classes="main-content"):
459
+ gr.HTML('<h2 class="section-title">🎬 Video Generation from Image</h2>')
460
+
461
+ with gr.Row():
462
+ with gr.Column():
463
+ video_input_image = gr.Image(
464
+ type="pil",
465
+ label="Input Image (from Step 1 or upload new)",
466
+ elem_classes="image-container"
467
+ )
468
+ video_prompt = gr.Textbox(
469
+ label="Animation Prompt",
470
+ value=default_prompt_i2v,
471
+ lines=3
472
+ )
473
+ duration_input = gr.Slider(
474
+ minimum=MIN_DURATION,
475
+ maximum=MAX_DURATION,
476
+ step=0.1,
477
+ value=3.5,
478
+ label="Duration (seconds)",
479
+ info=f"Clamped to {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps"
480
+ )
481
+
482
+ with gr.Accordion("Advanced Settings", open=False):
483
+ video_negative_prompt = gr.Textbox(
484
+ label="Negative Prompt",
485
+ value=default_negative_prompt,
486
+ lines=3
487
+ )
488
+ video_seed = gr.Slider(
489
+ label="Seed",
490
+ minimum=0,
491
+ maximum=MAX_SEED,
492
+ step=1,
493
+ value=42
494
+ )
495
+ randomize_seed = gr.Checkbox(
496
+ label="Randomize seed",
497
+ value=True
498
+ )
499
+ steps_slider = gr.Slider(
500
+ minimum=1,
501
+ maximum=30,
502
+ step=1,
503
+ value=6,
504
+ label="Inference Steps"
505
+ )
506
+ guidance_1 = gr.Slider(
507
+ minimum=0.0,
508
+ maximum=10.0,
509
+ step=0.5,
510
+ value=1,
511
+ label="Guidance Scale - High Noise"
512
+ )
513
+ guidance_2 = gr.Slider(
514
+ minimum=0.0,
515
+ maximum=10.0,
516
+ step=0.5,
517
+ value=1,
518
+ label="Guidance Scale - Low Noise"
519
+ )
520
+
521
+ generate_video_btn = gr.Button(
522
+ "Generate Video 🎬",
523
+ variant="primary",
524
+ size="lg"
525
+ )
526
+
527
+ with gr.Column():
528
+ video_output = gr.Video(
529
+ label="Generated Video",
530
+ autoplay=True
531
+ )
532
+ video_status = gr.Textbox(
533
+ label="Status",
534
+ interactive=False,
535
+ lines=1,
536
+ elem_classes="status-text",
537
+ value="Ready to generate video..."
538
+ )
539
+
540
+ # Event Handlers
541
+ def on_image_generated(prompt, img1, img2):
542
+ img, status, state_img = process_images(prompt, img1, img2)
543
+ if img:
544
+ return img, status, state_img, gr.update(visible=True)
545
+ return img, status, state_img, gr.update(visible=False)
546
+
547
+ def send_image_to_video(img):
548
+ if img:
549
+ return img, "Image loaded! Ready to generate video."
550
+ return None, "No image to send."
551
+
552
+ # Image generation events
553
+ generate_img_btn.click(
554
+ fn=on_image_generated,
555
+ inputs=[style_prompt, image1, image2],
556
+ outputs=[output_image, img_status, generated_image_state, send_to_video_btn]
557
+ )
558
+
559
+ # Send to video tab
560
+ send_to_video_btn.click(
561
+ fn=send_image_to_video,
562
+ inputs=[generated_image_state],
563
+ outputs=[video_input_image, video_status]
564
+ )
565
+
566
+ # Video generation events
567
+ video_inputs = [
568
+ video_input_image, video_prompt, steps_slider,
569
+ video_negative_prompt, duration_input,
570
+ guidance_1, guidance_2, video_seed, randomize_seed
571
+ ]
572
+
573
+ def generate_video_wrapper(*args):
574
+ try:
575
+ video_path, seed, status = generate_video(*args)
576
+ return video_path, seed, status
577
+ except Exception as e:
578
+ return None, args[7], f"Error: {str(e)}"
579
+
580
+ generate_video_btn.click(
581
+ fn=generate_video_wrapper,
582
+ inputs=video_inputs,
583
+ outputs=[video_output, video_seed, video_status]
584
+ )
585
+
586
+ # Examples for image generation
587
+ gr.Examples(
588
+ examples=[
589
+ ["Create a dreamy watercolor style with soft pastels", "examples/photo1.jpg", None],
590
+ ["Transform into cyberpunk neon aesthetic", "examples/photo2.jpg", "examples/style.jpg"],
591
+ ["Make it look like Studio Ghibli animation", "examples/landscape.jpg", None],
592
+ ],
593
+ inputs=[style_prompt, image1, image2],
594
+ outputs=[output_image, img_status],
595
+ fn=process_images,
596
+ cache_examples=False
597
+ )
598
+
599
+ # Launch
600
+ if __name__ == "__main__":
601
+ # Try to initialize video pipeline on startup
602
+ try:
603
+ initialize_video_pipeline()
604
+ except:
605
+ print("Video pipeline initialization deferred to first use")
606
+
607
+ demo.launch(
608
+ share=True,
609
+ server_name="0.0.0.0",
610
+ server_port=7860
611
+ )