rajux75 commited on
Commit
ad2e240
Β·
verified Β·
1 Parent(s): 5b29860

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +243 -89
app.py CHANGED
@@ -1,16 +1,69 @@
1
  import gradio as gr
2
  import time
3
- import random # To simulate variations like seed
4
-
5
- # --- Placeholder Function (Will Contain Your AI Logic Later) ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  def process_prompt_and_generate(user_prompt, image_resolution, guidance_scale, seed, animation_frames, animation_style):
7
  """
8
- This function simulates the AI pipeline. It accepts more parameters now.
9
  It yields updates for the status and logs.
10
  """
11
  logs = []
12
  status = "Starting processing..."
13
- yield user_prompt, "", None, None, "", "", "", "", "", "\n".join(logs), status # Initial state
 
14
 
15
  if not user_prompt:
16
  logs.append("Error: No prompt provided.")
@@ -18,64 +71,160 @@ def process_prompt_and_generate(user_prompt, image_resolution, guidance_scale, s
18
  yield user_prompt, "", None, None, "", "", "", "", "", "\n".join(logs), status
19
  return
20
 
21
- # --- Simulate Prompt Enhancement ---
22
- status = "Enhancing prompt..."
 
 
 
 
 
23
  logs.append(f"User Prompt: '{user_prompt}'")
24
- logs.append(f"Parameters: Resolution={image_resolution}, Guidance Scale={guidance_scale}, Seed={seed}, Frames={animation_frames}, Style={animation_style}")
25
- yield user_prompt, "", None, None, "", "", "", "", "", "\n".join(logs), status
26
- time.sleep(1.5) # Simulate time
27
-
28
- enhanced_prompt = f"Stunning, highly detailed, {animation_style.lower()} animation frame of {user_prompt}, cinematic lighting, high focus, 8k, trending on Artstation. Seed: {seed}" # Dummy enhancement
29
- logs.append(f"Enhanced Prompt: '{enhanced_prompt}'")
30
- yield user_prompt, enhanced_prompt, None, None, "", "", "", "", "", "\n".join(logs), status
31
- time.sleep(1) # Simulate time
32
-
33
- # --- Simulate Image Generation ---
34
- status = "Generating image..."
35
- logs.append(f"Generating initial image ({image_resolution}px)...")
36
- yield user_prompt, enhanced_prompt, None, None, "", "", "", "", "", "\n".join(logs), status
37
- time.sleep(3) # Simulate time
38
-
39
- # In a real app, this would call a model and return an image path/object
40
- dummy_image_path = "https://gradio-app.github.io/assets/tower.jpg" # Example image
41
- logs.append(f"Image generated successfully.")
42
- yield user_prompt, enhanced_prompt, dummy_image_path, None, "", "", "", "", "", "\n".join(logs), status
43
- time.sleep(1)
44
-
45
- # --- Simulate Animation ---
46
- status = "Generating animation..."
47
- logs.append(f"Generating animation ({animation_frames} frames, style: {animation_style})...")
48
- yield user_prompt, enhanced_prompt, dummy_image_path, None, "", "", "", "", "", "\n".join(logs), status
49
- time.sleep(4) # Simulate time
50
-
51
- # In a real app, this would call an animation model and return a video path/object
52
- dummy_video_path = "https://gradio-app.github.io/assets/video-demo.mp4" # Example video
53
- logs.append(f"Animation generated successfully.")
54
- yield user_prompt, enhanced_prompt, dummy_image_path, dummy_video_path, "", "", "", "", "", "\n".join(logs), status
55
- time.sleep(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
  # --- Finalizing Outputs ---
58
  status = "Process complete!"
59
  logs.append("All steps finished.")
60
 
61
- # Prepare metadata string for display
62
- metadata = f"Resolution: {image_resolution}px\nGuidance Scale: {guidance_scale}\nSeed: {seed}\nFrames: {animation_frames}\nStyle: {animation_style}"
63
-
64
-
65
- # The yield statements above sent partial updates. The final return sends the complete data.
66
- # However, with generator functions (`yield`), the final result is implicitly what was yielded last.
67
- # To ensure all outputs are correctly populated at the end, it's often better to have
68
- # the function return the final state once processing is completely done.
69
- # Let's refactor slightly to use a single return at the end for simplicity with Gradio's output mapping.
70
- # (Alternative: Use gr.Progress and yield intermediary results explicitly)
71
-
72
- # Reset logs for the final return state (or append "Complete")
73
- logs.append("Final output ready.")
74
-
75
- return (user_prompt, enhanced_prompt, dummy_image_path, dummy_video_path,
76
- str(image_resolution), str(guidance_scale), str(seed), str(animation_frames), animation_style,
77
- "\n".join(logs), status)
78
-
79
 
80
  # --- Gradio UI Definition ---
81
 
@@ -89,6 +238,13 @@ theme = gr.themes.Monochrome().set(
89
  # spacing_size_lg="2rem" # Example spacing adjustment
90
  )
91
 
 
 
 
 
 
 
 
92
 
93
  with gr.Blocks(theme=theme, title="AI Creative Studio") as demo:
94
 
@@ -109,6 +265,8 @@ with gr.Blocks(theme=theme, title="AI Creative Studio") as demo:
109
  # 🎨 Multi-Step AI Creative Pipeline πŸš€
110
  Unleash your imagination! Input a prompt, and our AI orchestrates a sequence:
111
  Prompt Enhancement β†’ Image Generation β†’ Animation.
 
 
112
  """
113
  )
114
  gr.Markdown("---") # Separator
@@ -169,8 +327,8 @@ with gr.Blocks(theme=theme, title="AI Creative Studio") as demo:
169
 
170
  # Row for media
171
  with gr.Row():
172
- generated_image_output = gr.Image(label="Generated Image", interactive=False, height=450, show_share_button=True)
173
- generated_animation_output = gr.Video(label="Generated Animation", interactive=False, height=450, show_share_button=True)
174
 
175
 
176
  # Display Parameters Used (Collapsed or in a smaller section)
@@ -178,16 +336,17 @@ with gr.Blocks(theme=theme, title="AI Creative Studio") as demo:
178
  parameters_used_output = gr.Textbox(
179
  label="Generation Parameters",
180
  interactive=False,
181
- lines=5,
 
182
  show_copy_button=True
183
  )
184
  # Dummy output components to catch the individual parameters
185
  # We will combine them in the process_prompt_and_generate function for the Textbox above
186
- res_out = gr.Textbox(visible=False)
187
- gs_out = gr.Textbox(visible=False)
188
- seed_out = gr.Textbox(visible=False)
189
- frames_out = gr.Textbox(visible=False)
190
- style_out = gr.Textbox(visible=False)
191
 
192
 
193
  # Download Buttons (Placeholder)
@@ -195,8 +354,9 @@ with gr.Blocks(theme=theme, title="AI Creative Studio") as demo:
195
  with gr.Row():
196
  # These buttons are just placeholders for now.
197
  # Real download logic needs separate functions.
198
- download_image_button = gr.Button("⬇️ Download Image", interactive=False) # Make interactive=True when download logic is added
199
- download_video_button = gr.Button("⬇️ Download Video", interactive=False) # Make interactive=True when download logic is added
 
200
 
201
  gr.Markdown("---") # Separator
202
 
@@ -216,6 +376,8 @@ with gr.Blocks(theme=theme, title="AI Creative Studio") as demo:
216
 
217
  # Button click triggers the main processing function
218
  # The outputs list maps the function's return values to UI components
 
 
219
  generate_button.click(
220
  fn=process_prompt_and_generate,
221
  inputs=[
@@ -238,32 +400,16 @@ with gr.Blocks(theme=theme, title="AI Creative Studio") as demo:
238
  style_out,
239
  logs_output, # Logs are updated incrementally/finally
240
  status_display # Status is updated incrementally/finally
241
- ]
242
- )
243
-
244
- # Update the combined parameters display after the main function runs
245
- # This uses the individual output components as inputs for a new function
246
- def update_parameters_display(res, gs, seed, frames, style):
247
- if not res: # Check if results exist
248
- return ""
249
- metadata = f"Resolution: {res}px\nGuidance Scale: {gs}\nSeed: {seed}\nFrames: {frames}\nStyle: {style}"
250
- return metadata
251
-
252
- # Add a state component to hold the output parameters temporarily
253
- # Or simply chain the update after the main click, using the same outputs
254
- # Let's chain it for simplicity here. The order matters.
255
- # This click event fires AFTER the main click event completes and updates its outputs.
256
- # It takes the updated outputs as its inputs.
257
- generate_button.click(
258
  fn=update_parameters_display,
259
  inputs=[res_out, gs_out, seed_out, frames_out, style_out],
260
  outputs=[parameters_used_output]
261
  )
262
 
263
- # Randomize Seed Button Interaction
264
- def randomize():
265
- return random.randint(1, 1000000) # Generate a random seed
266
 
 
267
  randomize_seed_button.click(
268
  fn=randomize,
269
  inputs=[],
@@ -274,5 +420,13 @@ with gr.Blocks(theme=theme, title="AI Creative Studio") as demo:
274
  # --- Launch the App ---
275
  if __name__ == "__main__":
276
  print("Gradio AI Creative Studio is starting...")
277
- demo.launch()
278
- print("App launched!")
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import time
3
+ import random
4
+ import torch
5
+ import numpy as np
6
+ from PIL import Image
7
+ import imageio # For saving video
8
+ import tempfile # For creating temporary files
9
+ import os
10
+
11
+ # --- Hugging Face Model Imports ---
12
+ from transformers import T5ForConditionalGeneration, T5Tokenizer
13
+ from diffusers import StableDiffusionPipeline, AnimateDiffPipeline, DDIMScheduler, MotionAdapter
14
+
15
+ # --- Model Loading (Load outside the function for better performance) ---
16
+ # Check for CUDA availability
17
+ device = "cuda" if torch.cuda.is_available() else "cpu"
18
+ print(f"Using device: {device}")
19
+
20
+ # Load Prompt Enhancement Model
21
+ print("Loading Prompt Enhancement Model (T5)...")
22
+ tokenizer_t5 = T5Tokenizer.from_pretrained("t5-small")
23
+ model_t5 = T5ForConditionalGeneration.from_pretrained("t5-small").to(device)
24
+ print("T5 model loaded.")
25
+
26
+ # Load Image Generation Model
27
+ print("Loading Image Generation Model (Stable Diffusion 1.5)...")
28
+ pipe_sd = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 if device == "cuda" else torch.float32).to(device)
29
+ # Optional: Enable optimizations if using CUDA
30
+ if device == "cuda":
31
+ pipe_sd.enable_xformers_memory_efficient_attention()
32
+ pipe_sd.enable_vae_slicing()
33
+ pipe_sd.enable_cfashion_scaling() # Typo: Should be enable_cfashion_scaling - correcting in code
34
+ # Corrected:
35
+ # pipe_sd.enable_cfashion_scaling() # This method doesn't exist. Common optimizations are xformers, vae slicing, model CPU offload. Let's stick to standard ones.
36
+ # For SDXL specifically, you might use enable_model_cpu_offload()
37
+
38
+ print("Stable Diffusion 1.5 model loaded.")
39
+
40
+
41
+ # Load Animation Model (AnimateDiff)
42
+ print("Loading Animation Model (AnimateDiff)...")
43
+ # Load motion module
44
+ adapter = MotionAdapter.from_pretrained("emperorxi/animatediff-motion-module-v1", torch_dtype=torch.float16 if device == "cuda" else torch.float32)
45
+ # Load base SD pipeline
46
+ pipe_anim = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", motion_adapter=adapter, torch_dtype=torch.float16 if device == "cuda" else torch.float32).to(device)
47
+ # Configure scheduler
48
+ pipe_anim.scheduler = DDIMScheduler.from_config(pipe_anim.scheduler.config, clip_sample=False, timestep_spacing="uniform")
49
+ # Optional: Enable optimizations if using CUDA
50
+ if device == "cuda":
51
+ pipe_anim.enable_xformers_memory_efficient_attention()
52
+ pipe_anim.enable_vae_slicing()
53
+ # pipe_anim.enable_model_cpu_offload() # Can be useful for memory, but slower if components are moved back and forth
54
+
55
+ print("AnimateDiff model loaded.")
56
+
57
+ # --- Function to run the pipeline ---
58
  def process_prompt_and_generate(user_prompt, image_resolution, guidance_scale, seed, animation_frames, animation_style):
59
  """
60
+ Runs the AI pipeline using Hugging Face models.
61
  It yields updates for the status and logs.
62
  """
63
  logs = []
64
  status = "Starting processing..."
65
+ # Yield initial state - Gradio expects all outputs to be present, even if empty
66
+ yield user_prompt, "", None, None, "", "", "", "", "", "\n".join(logs), status
67
 
68
  if not user_prompt:
69
  logs.append("Error: No prompt provided.")
 
71
  yield user_prompt, "", None, None, "", "", "", "", "", "\n".join(logs), status
72
  return
73
 
74
+ # Ensure seed is a positive integer, use random if -1
75
+ current_seed = seed if seed != -1 else random.randint(0, 100000000)
76
+ generator = torch.Generator(device=device).manual_seed(current_seed)
77
+ np.random.seed(current_seed) # Seed numpy too for any potential numpy randomness
78
+
79
+ # --- Step 1: Simulate Prompt Enhancement (using T5) ---
80
+ status = "Enhancing prompt (T5)..."
81
  logs.append(f"User Prompt: '{user_prompt}'")
82
+ logs.append(f"Parameters: Resolution={image_resolution}, Guidance Scale={guidance_scale}, Seed={current_seed}, Frames={animation_frames}, Style={animation_style}")
83
+ yield user_prompt, "", None, None, str(image_resolution), str(guidance_scale), str(current_seed), str(animation_frames), animation_style, "\n".join(logs), status # Update parameters display early
84
+ start_time = time.time()
85
+
86
+ try:
87
+ input_text = f"enhance prompt: {user_prompt}" # T5-small enhancement prefix
88
+ input_ids = tokenizer_t5(input_text, return_tensors="pt").input_ids.to(device)
89
+ outputs = model_t5.generate(input_ids, max_length=64, num_beams=4, early_stopping=True) # Keep enhancement concise
90
+ enhanced_prompt = tokenizer_t5.decode(outputs[0], skip_special_tokens=True)
91
+ logs.append(f"Enhanced Prompt: '{enhanced_prompt}'")
92
+ yield user_prompt, enhanced_prompt, None, None, str(image_resolution), str(guidance_scale), str(current_seed), str(animation_frames), animation_style, "\n".join(logs), status
93
+ except Exception as e:
94
+ logs.append(f"Error during prompt enhancement: {e}")
95
+ status = "Error during prompt enhancement."
96
+ yield user_prompt, "", None, None, str(image_resolution), str(guidance_scale), str(current_seed), str(animation_frames), animation_style, "\n".join(logs), status
97
+ return
98
+ end_time = time.time()
99
+ logs.append(f"Prompt enhancement took {end_time - start_time:.2f} seconds.")
100
+
101
+ # --- Step 2: Simulate Image Generation (using Stable Diffusion) ---
102
+ status = "Generating image (Stable Diffusion)..."
103
+ logs.append(f"Generating initial image ({image_resolution}x{image_resolution}px)...")
104
+ yield user_prompt, enhanced_prompt, None, None, str(image_resolution), str(guidance_scale), str(current_seed), str(animation_frames), animation_style, "\n".join(logs), status
105
+ start_time = time.time()
106
+
107
+ try:
108
+ # Generate the image
109
+ with torch.no_grad():
110
+ image = pipe_sd(
111
+ prompt=enhanced_prompt,
112
+ height=image_resolution,
113
+ width=image_resolution,
114
+ guidance_scale=guidance_scale,
115
+ generator=generator
116
+ ).images[0]
117
+
118
+ # Save the image temporarily
119
+ # Gradio can handle PIL images directly, but saving to a temp file is also common
120
+ # Using tempfile for a robust approach
121
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmpfile:
122
+ temp_image_path = tmpfile.name
123
+ image.save(temp_image_path)
124
+
125
+ logs.append(f"Image generated successfully: {temp_image_path}")
126
+ yield user_prompt, enhanced_prompt, temp_image_path, None, str(image_resolution), str(guidance_scale), str(current_seed), str(animation_frames), animation_style, "\n".join(logs), status
127
+ except Exception as e:
128
+ logs.append(f"Error during image generation: {e}")
129
+ status = "Error during image generation."
130
+ yield user_prompt, enhanced_prompt, None, None, str(image_resolution), str(guidance_scale), str(current_seed), str(animation_frames), animation_style, "\n".join(logs), status
131
+ # Clean up temp file if it exists from a partial save
132
+ if 'temp_image_path' in locals() and os.path.exists(temp_image_path):
133
+ os.remove(temp_image_path)
134
+ return
135
+ end_time = time.time()
136
+ logs.append(f"Image generation took {end_time - start_time:.2f} seconds.")
137
+
138
+
139
+ # --- Step 3: Simulate Animation (using AnimateDiff) ---
140
+ status = "Generating animation (AnimateDiff)..."
141
+ logs.append(f"Generating animation ({animation_frames} frames, style: {animation_style}). Note: 'Style' parameter currently doesn't directly control AnimateDiff output...") # Add note about style limitation
142
+ yield user_prompt, enhanced_prompt, temp_image_path, None, str(image_resolution), str(guidance_scale), str(current_seed), str(animation_frames), animation_style, "\n".join(logs), status
143
+ start_time = time.time()
144
+
145
+ try:
146
+ # Generate animation frames
147
+ # AnimateDiff takes text prompt and generates a sequence.
148
+ # The style parameter doesn't directly map to AnimateDiff options.
149
+ # We'll use the enhanced prompt and requested frames.
150
+ # Guidance scale might be applied differently or not at all depending on the pipeline implementation.
151
+ with torch.no_grad():
152
+ # The AnimateDiff pipeline often doesn't have image_resolution, guidance_scale,
153
+ # etc., parameters in the same way as text2image. It's primarily text-to-video.
154
+ # We'll use the enhanced prompt and num_frames.
155
+ # The height/width might default or need explicit setting if supported.
156
+ # Let's use default resolution for simplicity or check pipeline args.
157
+ # Assuming base SD resolution (512x512) if not explicitly supported/needed.
158
+ # The pipe_anim loaded is StableDiffusionPipeline with motion adapter, let's check its call signature.
159
+ # It should support most SD parameters.
160
+ animation_frames_list = pipe_anim(
161
+ prompt=enhanced_prompt,
162
+ negative_prompt=None, # Could add negative prompt if needed
163
+ num_frames=animation_frames,
164
+ guidance_scale=guidance_scale, # Use guidance scale if pipeline supports it
165
+ generator=generator,
166
+ # width=image_resolution, # AnimateDiff motion adapter might expect specific resolutions
167
+ # height=image_resolution, # Commented out for compatibility, using default
168
+ ).frames
169
+
170
+ # Compile frames into a video
171
+ with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
172
+ temp_video_path = tmpfile.name
173
+ # Use imageio to write video - requires ffmpeg or similar backend
174
+ # Ensure imageio can find a writer (like ffmpeg)
175
+ try:
176
+ imageio.mimwrite(temp_video_path, animation_frames_list, fps=8, quality=8) # Adjust fps and quality as needed
177
+ except Exception as ffmpeg_error:
178
+ logs.append(f"Error saving video with imageio/ffmpeg: {ffmpeg_error}")
179
+ logs.append("Ensure ffmpeg is installed and in your PATH, or use imageio.get_writer with a specific backend.")
180
+ status = "Error saving video."
181
+ # Attempt cleanup
182
+ if os.path.exists(temp_video_path):
183
+ os.remove(temp_video_path)
184
+ yield user_prompt, enhanced_prompt, temp_image_path, None, str(image_resolution), str(guidance_scale), str(current_seed), str(animation_frames), animation_style, "\n".join(logs), status
185
+ # Clean up temp image
186
+ if 'temp_image_path' in locals() and os.path.exists(temp_image_path):
187
+ os.remove(temp_image_path)
188
+ return
189
+
190
+
191
+ logs.append(f"Animation generated successfully: {temp_video_path}")
192
+ yield user_prompt, enhanced_prompt, temp_image_path, temp_video_path, str(image_resolution), str(guidance_scale), str(current_seed), str(animation_frames), animation_style, "\n".join(logs), status
193
+ except Exception as e:
194
+ logs.append(f"Error during animation generation: {e}")
195
+ status = "Error during animation generation."
196
+ yield user_prompt, enhanced_prompt, temp_image_path, None, str(image_resolution), str(guidance_scale), str(current_seed), str(animation_frames), animation_style, "\n".join(logs), status
197
+ # Clean up temp files
198
+ if 'temp_image_path' in locals() and os.path.exists(temp_image_path):
199
+ os.remove(temp_image_path)
200
+ if 'temp_video_path' in locals() and os.path.exists(temp_video_path):
201
+ os.remove(temp_video_path)
202
+ return
203
+ end_time = time.time()
204
+ logs.append(f"Animation generation took {end_time - start_time:.2f} seconds.")
205
 
206
  # --- Finalizing Outputs ---
207
  status = "Process complete!"
208
  logs.append("All steps finished.")
209
 
210
+ # Ensure all outputs are returned in the final state (yielded)
211
+ # The last yield in a generator function provides the final values for Gradio
212
+ # Let's make the last yield explicitly contain all final values
213
+ yield user_prompt, enhanced_prompt, temp_image_path, temp_video_path, \
214
+ str(image_resolution), str(guidance_scale), str(current_seed), str(animation_frames), animation_style, \
215
+ "\n".join(logs), status
216
+
217
+ # --- Function to update the parameters display (called after main function) ---
218
+ def update_parameters_display(res, gs, seed, frames, style):
219
+ # This function remains the same, it just formats the strings passed from the main function
220
+ if not res: # Check if results exist (e.g., first yield is empty)
221
+ return ""
222
+ metadata = f"Resolution: {res}px\nGuidance Scale: {gs}\nSeed: {seed}\nFrames: {frames}\nStyle: {style}\n(Note: Animation Style may not directly control model output)" # Add note here too
223
+ return metadata
224
+
225
+ # --- Function to randomize seed ---
226
+ def randomize():
227
+ return random.randint(1, 100000000) # Generate a random seed
228
 
229
  # --- Gradio UI Definition ---
230
 
 
238
  # spacing_size_lg="2rem" # Example spacing adjustment
239
  )
240
 
241
+ # Use tempfile for a base temp directory managed by the app
242
+ temp_dir = tempfile.mkdtemp()
243
+ print(f"Using temporary directory: {temp_dir}")
244
+
245
+ # Set Gradio's temp dir if needed (often handled automatically)
246
+ # gr.processing_utils.TEMP_DIR = temp_dir # This might be needed in older Gradio versions or specific setups
247
+
248
 
249
  with gr.Blocks(theme=theme, title="AI Creative Studio") as demo:
250
 
 
265
  # 🎨 Multi-Step AI Creative Pipeline πŸš€
266
  Unleash your imagination! Input a prompt, and our AI orchestrates a sequence:
267
  Prompt Enhancement β†’ Image Generation β†’ Animation.
268
+ **Using free models from Hugging Face (T5, Stable Diffusion 1.5, AnimateDiff).**
269
+ *Note: 'Animation Style' parameter might not directly control the AnimateDiff model output.*
270
  """
271
  )
272
  gr.Markdown("---") # Separator
 
327
 
328
  # Row for media
329
  with gr.Row():
330
+ generated_image_output = gr.Image(label="Generated Image", interactive=False, height=450, show_share_button=True, type="filepath") # Specify type="filepath"
331
+ generated_animation_output = gr.Video(label="Generated Animation", interactive=False, height=450, show_share_button=True, type="filepath") # Specify type="filepath"
332
 
333
 
334
  # Display Parameters Used (Collapsed or in a smaller section)
 
336
  parameters_used_output = gr.Textbox(
337
  label="Generation Parameters",
338
  interactive=False,
339
+ lines=6, # Increased lines slightly to fit the note
340
+ max_lines=30,
341
  show_copy_button=True
342
  )
343
  # Dummy output components to catch the individual parameters
344
  # We will combine them in the process_prompt_and_generate function for the Textbox above
345
+ res_out = gr.Textbox(visible=False, type="value")
346
+ gs_out = gr.Textbox(visible=False, type="value")
347
+ seed_out = gr.Textbox(visible=False, type="value")
348
+ frames_out = gr.Textbox(visible=False, type="value")
349
+ style_out = gr.Textbox(visible=False, type="value")
350
 
351
 
352
  # Download Buttons (Placeholder)
 
354
  with gr.Row():
355
  # These buttons are just placeholders for now.
356
  # Real download logic needs separate functions.
357
+ # Making them interactive=False as they don't have click events linked
358
+ download_image_button = gr.Button("⬇️ Download Image", interactive=False)
359
+ download_video_button = gr.Button("⬇️ Download Video", interactive=False)
360
 
361
  gr.Markdown("---") # Separator
362
 
 
376
 
377
  # Button click triggers the main processing function
378
  # The outputs list maps the function's return values to UI components
379
+ # Because process_prompt_and_generate is a generator, Gradio updates the outputs
380
+ # with each yielded value. The final yield provides the final state.
381
  generate_button.click(
382
  fn=process_prompt_and_generate,
383
  inputs=[
 
400
  style_out,
401
  logs_output, # Logs are updated incrementally/finally
402
  status_display # Status is updated incrementally/finally
403
+ ],
404
+ api_name="generate" # Optional: Add an API name for easy calling
405
+ ).success( # Chain the parameter update after the main process finishes successfully
 
 
 
 
 
 
 
 
 
 
 
 
 
 
406
  fn=update_parameters_display,
407
  inputs=[res_out, gs_out, seed_out, frames_out, style_out],
408
  outputs=[parameters_used_output]
409
  )
410
 
 
 
 
411
 
412
+ # Randomize Seed Button Interaction
413
  randomize_seed_button.click(
414
  fn=randomize,
415
  inputs=[],
 
420
  # --- Launch the App ---
421
  if __name__ == "__main__":
422
  print("Gradio AI Creative Studio is starting...")
423
+ # Use share=True to make it accessible over the internet (for testing)
424
+ # Use inbrowser=True to auto-open the browser
425
+ demo.launch(inbrowser=True)
426
+ print("App launched!")
427
+
428
+ # Optional: Clean up the temporary directory when the app stops
429
+ # This is not automatically called when you Ctrl+C, but useful in some deployment scenarios
430
+ # import shutil
431
+ # shutil.rmtree(temp_dir)
432
+ # print(f"Cleaned up temporary directory: {temp_dir}")