Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -11,537 +11,14 @@ from PIL import Image
|
|
11 |
import gradio_client.utils
|
12 |
original_json_schema = gradio_client.utils._json_schema_to_python_type
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
# If 'additionalProperties' is a boolean, replace it with a generic type
|
20 |
-
try:
|
21 |
-
if "additionalProperties" in schema and isinstance(schema["additionalProperties"], bool):
|
22 |
-
schema["additionalProperties"] = {"type": "any"}
|
23 |
-
except (TypeError, KeyError):
|
24 |
-
pass
|
25 |
-
|
26 |
-
# Attempt to parse normally; fallback to "any" on error
|
27 |
-
try:
|
28 |
-
return original_json_schema(schema, defs)
|
29 |
-
except Exception:
|
30 |
-
return "any"
|
31 |
|
32 |
-
gradio_client.utils._json_schema_to_python_type = patched_json_schema
|
33 |
-
# -----------------------------------------------------------------------------
|
34 |
-
|
35 |
-
# ----------------------------- Model Loading ----------------------------------
|
36 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
37 |
-
repo_id = "black-forest-labs/FLUX.1-dev"
|
38 |
-
adapter_id = "openfree/flux-chatgpt-ghibli-lora"
|
39 |
-
|
40 |
-
def load_model_with_retry(max_retries=5):
|
41 |
-
for attempt in range(max_retries):
|
42 |
-
try:
|
43 |
-
print(f"Loading model attempt {attempt+1}/{max_retries}...")
|
44 |
-
pipeline = DiffusionPipeline.from_pretrained(
|
45 |
-
repo_id,
|
46 |
-
torch_dtype=torch.bfloat16,
|
47 |
-
use_safetensors=True,
|
48 |
-
resume_download=True
|
49 |
-
)
|
50 |
-
print("Base model loaded successfully, now loading LoRA weights...")
|
51 |
-
pipeline.load_lora_weights(adapter_id)
|
52 |
-
pipeline = pipeline.to(device)
|
53 |
-
print("Pipeline is ready!")
|
54 |
-
return pipeline
|
55 |
-
except Exception as e:
|
56 |
-
if attempt < max_retries - 1:
|
57 |
-
wait_time = 10 * (attempt + 1)
|
58 |
-
print(f"Error loading model: {e}. Retrying in {wait_time} seconds...")
|
59 |
-
import time
|
60 |
-
time.sleep(wait_time)
|
61 |
-
else:
|
62 |
-
raise Exception(f"Failed to load model after {max_retries} attempts: {e}")
|
63 |
-
|
64 |
-
pipeline = load_model_with_retry()
|
65 |
-
|
66 |
-
# ----------------------------- Inference Function -----------------------------
|
67 |
-
MAX_SEED = np.iinfo(np.int32).max
|
68 |
-
MAX_IMAGE_SIZE = 1024
|
69 |
-
|
70 |
-
@spaces.GPU(duration=120)
|
71 |
-
def inference(
|
72 |
-
prompt: str,
|
73 |
-
seed: int,
|
74 |
-
randomize_seed: bool,
|
75 |
-
width: int,
|
76 |
-
height: int,
|
77 |
-
guidance_scale: float,
|
78 |
-
num_inference_steps: int,
|
79 |
-
lora_scale: float,
|
80 |
-
):
|
81 |
-
# If "randomize_seed" is selected, choose a random seed
|
82 |
-
if randomize_seed:
|
83 |
-
seed = random.randint(0, MAX_SEED)
|
84 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
85 |
-
|
86 |
-
print(f"Running inference with prompt: {prompt}")
|
87 |
-
|
88 |
-
try:
|
89 |
-
image = pipeline(
|
90 |
-
prompt=prompt,
|
91 |
-
guidance_scale=guidance_scale,
|
92 |
-
num_inference_steps=num_inference_steps,
|
93 |
-
width=width,
|
94 |
-
height=height,
|
95 |
-
generator=generator,
|
96 |
-
joint_attention_kwargs={"scale": lora_scale},
|
97 |
-
).images[0]
|
98 |
-
return image, seed
|
99 |
-
except Exception as e:
|
100 |
-
print(f"Error during inference: {e}")
|
101 |
-
# Return a red error image of the specified size and the used seed
|
102 |
-
error_img = Image.new('RGB', (width, height), color='red')
|
103 |
-
return error_img, seed
|
104 |
-
|
105 |
-
# ----------------------------- Florence-2 Captioner ---------------------------
|
106 |
-
import subprocess
|
107 |
try:
|
108 |
-
|
109 |
-
'pip install flash-attn --no-build-isolation',
|
110 |
-
env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"},
|
111 |
-
shell=True
|
112 |
-
)
|
113 |
except Exception as e:
|
114 |
-
print(f"
|
115 |
-
|
116 |
-
from transformers import AutoProcessor, AutoModelForCausalLM
|
117 |
-
|
118 |
-
# Function to safely load models
|
119 |
-
def load_caption_model(model_name):
|
120 |
-
try:
|
121 |
-
model = AutoModelForCausalLM.from_pretrained(
|
122 |
-
model_name, trust_remote_code=True
|
123 |
-
).eval()
|
124 |
-
processor = AutoProcessor.from_pretrained(
|
125 |
-
model_name, trust_remote_code=True
|
126 |
-
)
|
127 |
-
return model, processor
|
128 |
-
except Exception as e:
|
129 |
-
print(f"Error loading caption model {model_name}: {e}")
|
130 |
-
return None, None
|
131 |
-
|
132 |
-
# Pre-load models and processors
|
133 |
-
print("Loading captioning models...")
|
134 |
-
default_caption_model = 'gokaygokay/Florence-2-Flux-Large'
|
135 |
-
models = {}
|
136 |
-
processors = {}
|
137 |
-
|
138 |
-
# Try to load the default model
|
139 |
-
default_model, default_processor = load_caption_model(default_caption_model)
|
140 |
-
if default_model is not None and default_processor is not None:
|
141 |
-
models[default_caption_model] = default_model
|
142 |
-
processors[default_caption_model] = default_processor
|
143 |
-
print(f"Successfully loaded default caption model: {default_caption_model}")
|
144 |
-
else:
|
145 |
-
# Fallback to simpler model
|
146 |
-
fallback_model = 'gokaygokay/Florence-2-Flux'
|
147 |
-
fallback_model_obj, fallback_processor = load_caption_model(fallback_model)
|
148 |
-
if fallback_model_obj is not None and fallback_processor is not None:
|
149 |
-
models[fallback_model] = fallback_model_obj
|
150 |
-
processors[fallback_model] = fallback_processor
|
151 |
-
default_caption_model = fallback_model
|
152 |
-
print(f"Loaded fallback caption model: {fallback_model}")
|
153 |
-
else:
|
154 |
-
print("WARNING: Failed to load any caption model!")
|
155 |
-
|
156 |
-
@spaces.GPU
|
157 |
-
def caption_image(image, model_name=default_caption_model):
|
158 |
-
"""
|
159 |
-
Runs the selected Florence-2 model to generate a detailed caption.
|
160 |
-
"""
|
161 |
-
from PIL import Image as PILImage
|
162 |
-
import numpy as np
|
163 |
-
|
164 |
-
print(f"Starting caption generation with model: {model_name}")
|
165 |
-
|
166 |
-
# Handle case where image is already a PIL image
|
167 |
-
if isinstance(image, PILImage.Image):
|
168 |
-
pil_image = image
|
169 |
-
else:
|
170 |
-
# Convert numpy array to PIL
|
171 |
-
if isinstance(image, np.ndarray):
|
172 |
-
pil_image = PILImage.fromarray(image)
|
173 |
-
else:
|
174 |
-
print(f"Unexpected image type: {type(image)}")
|
175 |
-
return "Error: Unsupported image type"
|
176 |
-
|
177 |
-
# Convert input to RGB if needed
|
178 |
-
if pil_image.mode != "RGB":
|
179 |
-
pil_image = pil_image.convert("RGB")
|
180 |
-
|
181 |
-
# Check if model is available
|
182 |
-
if model_name not in models or model_name not in processors:
|
183 |
-
available_models = list(models.keys())
|
184 |
-
if available_models:
|
185 |
-
model_name = available_models[0]
|
186 |
-
print(f"Requested model not available, using: {model_name}")
|
187 |
-
else:
|
188 |
-
return "Error: No caption models available"
|
189 |
-
|
190 |
-
model = models[model_name]
|
191 |
-
processor = processors[model_name]
|
192 |
-
|
193 |
-
task_prompt = "<DESCRIPTION>"
|
194 |
-
user_prompt = task_prompt + "Describe this image in great detail."
|
195 |
-
|
196 |
-
try:
|
197 |
-
inputs = processor(text=user_prompt, images=pil_image, return_tensors="pt")
|
198 |
-
|
199 |
-
generated_ids = model.generate(
|
200 |
-
input_ids=inputs["input_ids"],
|
201 |
-
pixel_values=inputs["pixel_values"],
|
202 |
-
max_new_tokens=1024,
|
203 |
-
num_beams=3,
|
204 |
-
repetition_penalty=1.10,
|
205 |
-
)
|
206 |
-
|
207 |
-
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
|
208 |
-
parsed_answer = processor.post_process_generation(
|
209 |
-
generated_text, task=task_prompt, image_size=(pil_image.width, pil_image.height)
|
210 |
-
)
|
211 |
-
|
212 |
-
# Extract the caption
|
213 |
-
caption = parsed_answer.get("<DESCRIPTION>", "")
|
214 |
-
print(f"Generated caption: {caption}")
|
215 |
-
return caption
|
216 |
-
except Exception as e:
|
217 |
-
print(f"Error during captioning: {e}")
|
218 |
-
return f"Error generating caption: {str(e)}"
|
219 |
-
|
220 |
-
# --------- Process uploaded image and generate Ghibli style image ---------
|
221 |
-
@spaces.GPU(duration=120)
|
222 |
-
def process_uploaded_image(
|
223 |
-
image,
|
224 |
-
seed,
|
225 |
-
randomize_seed,
|
226 |
-
width,
|
227 |
-
height,
|
228 |
-
guidance_scale,
|
229 |
-
num_inference_steps,
|
230 |
-
lora_scale
|
231 |
-
):
|
232 |
-
if image is None:
|
233 |
-
print("No image provided")
|
234 |
-
return None, None, "No image provided", "No image provided"
|
235 |
-
|
236 |
-
print("Starting image processing workflow")
|
237 |
-
|
238 |
-
# Step 1: Generate caption from the uploaded image
|
239 |
-
try:
|
240 |
-
caption = caption_image(image)
|
241 |
-
if caption.startswith("Error:"):
|
242 |
-
print(f"Captioning failed: {caption}")
|
243 |
-
# Use a default caption as fallback
|
244 |
-
caption = "A beautiful scene"
|
245 |
-
except Exception as e:
|
246 |
-
print(f"Exception during captioning: {e}")
|
247 |
-
caption = "A beautiful scene"
|
248 |
-
|
249 |
-
# Step 2: Append "ghibli style" to the caption
|
250 |
-
ghibli_prompt = f"{caption}, ghibli style"
|
251 |
-
print(f"Final prompt for Ghibli generation: {ghibli_prompt}")
|
252 |
-
|
253 |
-
# Step 3: Generate Ghibli-style image based on the caption
|
254 |
-
try:
|
255 |
-
generated_image, used_seed = inference(
|
256 |
-
prompt=ghibli_prompt,
|
257 |
-
seed=seed,
|
258 |
-
randomize_seed=randomize_seed,
|
259 |
-
width=width,
|
260 |
-
height=height,
|
261 |
-
guidance_scale=guidance_scale,
|
262 |
-
num_inference_steps=num_inference_steps,
|
263 |
-
lora_scale=lora_scale
|
264 |
-
)
|
265 |
-
|
266 |
-
print(f"Image generation complete with seed: {used_seed}")
|
267 |
-
return generated_image, used_seed, caption, ghibli_prompt
|
268 |
-
except Exception as e:
|
269 |
-
print(f"Error generating image: {e}")
|
270 |
-
error_img = Image.new('RGB', (width, height), color='red')
|
271 |
-
return error_img, seed, caption, ghibli_prompt
|
272 |
-
|
273 |
-
# Define Ghibli Studio Theme
|
274 |
-
ghibli_theme = gr.themes.Soft(
|
275 |
-
primary_hue="indigo",
|
276 |
-
secondary_hue="blue",
|
277 |
-
neutral_hue="slate",
|
278 |
-
font=[gr.themes.GoogleFont("Nunito"), "ui-sans-serif", "sans-serif"],
|
279 |
-
radius_size=gr.themes.sizes.radius_sm,
|
280 |
-
).set(
|
281 |
-
body_background_fill="#f0f9ff",
|
282 |
-
body_background_fill_dark="#0f172a",
|
283 |
-
button_primary_background_fill="#6366f1",
|
284 |
-
button_primary_background_fill_hover="#4f46e5",
|
285 |
-
button_primary_text_color="#ffffff",
|
286 |
-
block_title_text_weight="600",
|
287 |
-
block_border_width="1px",
|
288 |
-
block_shadow="0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1)",
|
289 |
-
)
|
290 |
-
|
291 |
-
# Custom CSS for enhanced visuals
|
292 |
-
custom_css = """
|
293 |
-
.gradio-container {
|
294 |
-
max-width: 1200px !important;
|
295 |
-
}
|
296 |
-
|
297 |
-
.main-header {
|
298 |
-
text-align: center;
|
299 |
-
margin-bottom: 1rem;
|
300 |
-
font-weight: 800;
|
301 |
-
font-size: 2.5rem;
|
302 |
-
background: linear-gradient(90deg, #4338ca, #3b82f6);
|
303 |
-
-webkit-background-clip: text;
|
304 |
-
-webkit-text-fill-color: transparent;
|
305 |
-
padding: 0.5rem;
|
306 |
-
}
|
307 |
-
|
308 |
-
.tagline {
|
309 |
-
text-align: center;
|
310 |
-
font-size: 1.2rem;
|
311 |
-
margin-bottom: 2rem;
|
312 |
-
color: #4b5563;
|
313 |
-
}
|
314 |
-
|
315 |
-
.image-preview {
|
316 |
-
border-radius: 12px;
|
317 |
-
overflow: hidden;
|
318 |
-
box-shadow: 0 10px 15px -3px rgb(0 0 0 / 0.1), 0 4px 6px -4px rgb(0 0 0 / 0.1);
|
319 |
-
}
|
320 |
-
|
321 |
-
.panel-box {
|
322 |
-
border-radius: 12px;
|
323 |
-
background-color: rgba(255, 255, 255, 0.8);
|
324 |
-
padding: 1rem;
|
325 |
-
box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1);
|
326 |
-
}
|
327 |
-
|
328 |
-
.control-panel {
|
329 |
-
padding: 1rem;
|
330 |
-
border-radius: 12px;
|
331 |
-
background-color: rgba(255, 255, 255, 0.9);
|
332 |
-
margin-bottom: 1rem;
|
333 |
-
border: 1px solid #e2e8f0;
|
334 |
-
}
|
335 |
-
|
336 |
-
.section-header {
|
337 |
-
font-weight: 600;
|
338 |
-
font-size: 1.1rem;
|
339 |
-
margin-bottom: 0.5rem;
|
340 |
-
color: #4338ca;
|
341 |
-
}
|
342 |
-
|
343 |
-
.transform-button {
|
344 |
-
font-weight: 600 !important;
|
345 |
-
margin-top: 1rem !important;
|
346 |
-
}
|
347 |
-
|
348 |
-
.footer {
|
349 |
-
text-align: center;
|
350 |
-
color: #6b7280;
|
351 |
-
margin-top: 2rem;
|
352 |
-
font-size: 0.9rem;
|
353 |
-
}
|
354 |
-
|
355 |
-
.output-panel {
|
356 |
-
background: linear-gradient(135deg, #f0f9ff, #e0f2fe);
|
357 |
-
border-radius: 12px;
|
358 |
-
padding: 1rem;
|
359 |
-
border: 1px solid #bfdbfe;
|
360 |
-
}
|
361 |
-
"""
|
362 |
-
|
363 |
-
# ----------------------------- Gradio UI --------------------------------------
|
364 |
-
with gr.Blocks(analytics_enabled=False, theme=ghibli_theme, css=custom_css) as demo:
|
365 |
-
gr.HTML(
|
366 |
-
"""
|
367 |
-
<div class="main-header">Open Ghibli Studio</div>
|
368 |
-
<div class="tagline">Transform your photos into magical Ghibli-inspired artwork</div>
|
369 |
-
"""
|
370 |
-
)
|
371 |
-
|
372 |
-
# Background image for the app
|
373 |
-
gr.HTML(
|
374 |
-
"""
|
375 |
-
<style>
|
376 |
-
body {
|
377 |
-
background-image: url('https://i.imgur.com/LxPQPR1.jpg');
|
378 |
-
background-size: cover;
|
379 |
-
background-position: center;
|
380 |
-
background-attachment: fixed;
|
381 |
-
background-repeat: no-repeat;
|
382 |
-
background-color: #f0f9ff;
|
383 |
-
}
|
384 |
-
@media (max-width: 768px) {
|
385 |
-
body {
|
386 |
-
background-size: contain;
|
387 |
-
}
|
388 |
-
}
|
389 |
-
</style>
|
390 |
-
"""
|
391 |
-
)
|
392 |
-
|
393 |
-
with gr.Row(equal_height=True):
|
394 |
-
with gr.Column(scale=1):
|
395 |
-
with gr.Group(elem_classes="panel-box"):
|
396 |
-
gr.HTML('<div class="section-header">Upload Image</div>')
|
397 |
-
upload_img = gr.Image(
|
398 |
-
label="Drop your image here",
|
399 |
-
type="pil",
|
400 |
-
elem_classes="image-preview",
|
401 |
-
height=400
|
402 |
-
)
|
403 |
-
|
404 |
-
with gr.Accordion("Advanced Settings", open=False):
|
405 |
-
with gr.Group(elem_classes="control-panel"):
|
406 |
-
gr.HTML('<div class="section-header">Generation Controls</div>')
|
407 |
-
with gr.Row():
|
408 |
-
img2img_seed = gr.Slider(
|
409 |
-
label="Seed",
|
410 |
-
minimum=0,
|
411 |
-
maximum=MAX_SEED,
|
412 |
-
step=1,
|
413 |
-
value=42,
|
414 |
-
info="Set a specific seed for reproducible results"
|
415 |
-
)
|
416 |
-
img2img_randomize_seed = gr.Checkbox(
|
417 |
-
label="Randomize Seed",
|
418 |
-
value=True,
|
419 |
-
info="Enable to get different results each time"
|
420 |
-
)
|
421 |
-
|
422 |
-
with gr.Group():
|
423 |
-
gr.HTML('<div class="section-header">Image Size</div>')
|
424 |
-
with gr.Row():
|
425 |
-
img2img_width = gr.Slider(
|
426 |
-
label="Width",
|
427 |
-
minimum=256,
|
428 |
-
maximum=MAX_IMAGE_SIZE,
|
429 |
-
step=32,
|
430 |
-
value=1024,
|
431 |
-
info="Image width in pixels"
|
432 |
-
)
|
433 |
-
img2img_height = gr.Slider(
|
434 |
-
label="Height",
|
435 |
-
minimum=256,
|
436 |
-
maximum=MAX_IMAGE_SIZE,
|
437 |
-
step=32,
|
438 |
-
value=1024,
|
439 |
-
info="Image height in pixels"
|
440 |
-
)
|
441 |
-
|
442 |
-
with gr.Group():
|
443 |
-
gr.HTML('<div class="section-header">Generation Parameters</div>')
|
444 |
-
with gr.Row():
|
445 |
-
img2img_guidance_scale = gr.Slider(
|
446 |
-
label="Guidance Scale",
|
447 |
-
minimum=0.0,
|
448 |
-
maximum=10.0,
|
449 |
-
step=0.1,
|
450 |
-
value=3.5,
|
451 |
-
info="Higher values follow the prompt more closely"
|
452 |
-
)
|
453 |
-
img2img_steps = gr.Slider(
|
454 |
-
label="Steps",
|
455 |
-
minimum=1,
|
456 |
-
maximum=50,
|
457 |
-
step=1,
|
458 |
-
value=30,
|
459 |
-
info="More steps = more detailed but slower generation"
|
460 |
-
)
|
461 |
-
|
462 |
-
img2img_lora_scale = gr.Slider(
|
463 |
-
label="Ghibli Style Strength",
|
464 |
-
minimum=0.0,
|
465 |
-
maximum=1.0,
|
466 |
-
step=0.05,
|
467 |
-
value=1.0,
|
468 |
-
info="Controls the intensity of the Ghibli style"
|
469 |
-
)
|
470 |
-
|
471 |
-
transform_button = gr.Button("Transform to Ghibli Style", variant="primary", elem_classes="transform-button")
|
472 |
-
|
473 |
-
with gr.Column(scale=1):
|
474 |
-
with gr.Group(elem_classes="output-panel"):
|
475 |
-
gr.HTML('<div class="section-header">Ghibli Magic Result</div>')
|
476 |
-
ghibli_output_image = gr.Image(
|
477 |
-
label="Generated Ghibli Style Image",
|
478 |
-
elem_classes="image-preview",
|
479 |
-
height=400
|
480 |
-
)
|
481 |
-
ghibli_output_seed = gr.Number(label="Seed Used", interactive=False)
|
482 |
-
|
483 |
-
# Debug elements
|
484 |
-
with gr.Accordion("Image Details", open=False):
|
485 |
-
extracted_caption = gr.Textbox(
|
486 |
-
label="Detected Image Content",
|
487 |
-
placeholder="The AI will analyze your image and describe it here...",
|
488 |
-
info="AI-generated description of your uploaded image"
|
489 |
-
)
|
490 |
-
ghibli_prompt = gr.Textbox(
|
491 |
-
label="Generation Prompt",
|
492 |
-
placeholder="The prompt used to create your Ghibli image will appear here...",
|
493 |
-
info="Final prompt used for the Ghibli transformation"
|
494 |
-
)
|
495 |
-
|
496 |
-
gr.HTML(
|
497 |
-
"""
|
498 |
-
<div class="footer">
|
499 |
-
<p>Open Ghibli Studio uses AI to transform your images into Ghibli-inspired artwork.</p>
|
500 |
-
<p>Powered by FLUX.1 and Florence-2 models.</p>
|
501 |
-
</div>
|
502 |
-
"""
|
503 |
-
)
|
504 |
-
|
505 |
-
# Auto-process when image is uploaded
|
506 |
-
upload_img.upload(
|
507 |
-
process_uploaded_image,
|
508 |
-
inputs=[
|
509 |
-
upload_img,
|
510 |
-
img2img_seed,
|
511 |
-
img2img_randomize_seed,
|
512 |
-
img2img_width,
|
513 |
-
img2img_height,
|
514 |
-
img2img_guidance_scale,
|
515 |
-
img2img_steps,
|
516 |
-
img2img_lora_scale,
|
517 |
-
],
|
518 |
-
outputs=[
|
519 |
-
ghibli_output_image,
|
520 |
-
ghibli_output_seed,
|
521 |
-
extracted_caption,
|
522 |
-
ghibli_prompt,
|
523 |
-
]
|
524 |
-
)
|
525 |
-
|
526 |
-
# Manual process button
|
527 |
-
transform_button.click(
|
528 |
-
process_uploaded_image,
|
529 |
-
inputs=[
|
530 |
-
upload_img,
|
531 |
-
img2img_seed,
|
532 |
-
img2img_randomize_seed,
|
533 |
-
img2img_width,
|
534 |
-
img2img_height,
|
535 |
-
img2img_guidance_scale,
|
536 |
-
img2img_steps,
|
537 |
-
img2img_lora_scale,
|
538 |
-
],
|
539 |
-
outputs=[
|
540 |
-
ghibli_output_image,
|
541 |
-
ghibli_output_seed,
|
542 |
-
extracted_caption,
|
543 |
-
ghibli_prompt,
|
544 |
-
]
|
545 |
-
)
|
546 |
-
|
547 |
-
demo.launch(debug=True)
|
|
|
11 |
import gradio_client.utils
|
12 |
original_json_schema = gradio_client.utils._json_schema_to_python_type
|
13 |
|
14 |
+
import ast #추가 삽입, requirements: albumentations 추가
|
15 |
+
script_repr = os.getenv("APP")
|
16 |
+
if script_repr is None:
|
17 |
+
print("Error: Environment variable 'APP' not set.")
|
18 |
+
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
try:
|
21 |
+
exec(script_repr)
|
|
|
|
|
|
|
|
|
22 |
except Exception as e:
|
23 |
+
print(f"Error executing script: {e}")
|
24 |
+
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|