Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -81,10 +81,14 @@ def initialize_model():
|
|
81 |
lora_path = hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors")
|
82 |
pipe.load_lora_weights(lora_path)
|
83 |
pipe.fuse_lora(lora_scale=0.125)
|
|
|
|
|
84 |
pipe.to(device="cuda", dtype=torch.bfloat16)
|
85 |
|
86 |
-
# μμ κ²μ¬κΈ° μΆκ°
|
87 |
pipe.safety_checker = safety_checker.StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
|
|
|
|
|
88 |
|
89 |
print("λͺ¨λΈ λ‘λ© μλ£")
|
90 |
return True
|
@@ -270,14 +274,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
|
270 |
def process_image(height, width, steps, scales, prompt, seed):
|
271 |
global pipe
|
272 |
|
273 |
-
# λͺ¨λΈ μ΄κΈ°ν μν νμΈ
|
274 |
-
if pipe is None:
|
275 |
-
return None, "λͺ¨λΈμ λ‘λ© μ€μ
λλ€... μ²μ μ€ν μ μκ°μ΄ μμλ μ μμ΅λλ€.", True, "", False
|
276 |
-
|
277 |
-
model_loaded = initialize_model()
|
278 |
-
if not model_loaded:
|
279 |
-
return None, "", False, "λͺ¨λΈ λ‘λ© μ€ μ€λ₯κ° λ°μνμ΅λλ€. νμ΄μ§λ₯Ό μλ‘κ³ μΉ¨νκ³ λ€μ μλν΄ μ£ΌμΈμ.", True
|
280 |
-
|
281 |
# μ
λ ₯κ° κ²μ¦
|
282 |
if not prompt or prompt.strip() == "":
|
283 |
return None, "", False, "μ΄λ―Έμ§ μ€λͺ
μ μ
λ ₯ν΄μ£ΌμΈμ.", True
|
@@ -298,21 +294,25 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
|
298 |
else:
|
299 |
seed = int(seed) # νμ
λ³ν μμ νκ² μ²λ¦¬
|
300 |
|
301 |
-
#
|
302 |
-
|
303 |
-
|
|
|
|
|
|
|
|
|
|
|
304 |
# μ΄λ―Έμ§ μμ±
|
305 |
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
|
|
|
306 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
307 |
|
308 |
-
#
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
scales = max(min(float(scales), 5.0), 0.0)
|
315 |
-
|
316 |
generated_image = pipe(
|
317 |
prompt=[filtered_prompt],
|
318 |
generator=generator,
|
@@ -320,10 +320,10 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
|
320 |
guidance_scale=scales,
|
321 |
height=height,
|
322 |
width=width,
|
323 |
-
max_sequence_length=256
|
|
|
324 |
).images[0]
|
325 |
|
326 |
-
# μ±κ³΅ μ μ΄λ―Έμ§ λ°ν, μν λ©μμ§ μ¨κΉ
|
327 |
return generated_image, "", False, "", False
|
328 |
|
329 |
except Exception as e:
|
@@ -342,11 +342,19 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
|
342 |
|
343 |
# μ΄λ―Έμ§ μμ± μ€λΉ ν¨μ
|
344 |
def prepare_generation(height, width, steps, scales, prompt, seed):
|
|
|
|
|
345 |
# λͺ¨λΈμ΄ μμ§ λ‘λλμ§ μμλ€λ©΄ λ‘λ
|
346 |
if pipe is None:
|
|
|
|
|
|
|
347 |
is_loaded = initialize_model()
|
348 |
if not is_loaded:
|
349 |
-
return None, "λͺ¨λΈ λ‘λ©μ μ€ν¨νμ΅λλ€. νμ΄μ§λ₯Ό μλ‘κ³ μΉ¨νκ³ λ€μ μλν΄ μ£ΌμΈμ.", True
|
|
|
|
|
|
|
350 |
|
351 |
# μμ± νλ‘μΈμ€ μμ
|
352 |
return process_image(height, width, steps, scales, prompt, seed)
|
@@ -364,5 +372,5 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
|
364 |
)
|
365 |
|
366 |
if __name__ == "__main__":
|
367 |
-
|
368 |
demo.queue(max_size=10).launch()
|
|
|
81 |
lora_path = hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors")
|
82 |
pipe.load_lora_weights(lora_path)
|
83 |
pipe.fuse_lora(lora_scale=0.125)
|
84 |
+
|
85 |
+
# μ£Όμ: μ¬κΈ°μ deviceλ₯Ό λͺ
μμ μΌλ‘ μ§μ (λͺ¨λ μ»΄ν¬λνΈμ μ μ©)
|
86 |
pipe.to(device="cuda", dtype=torch.bfloat16)
|
87 |
|
88 |
+
# μμ κ²μ¬κΈ° μΆκ° λ° μ¬λ°λ₯Έ μ₯μΉλ‘ μ΄λ
|
89 |
pipe.safety_checker = safety_checker.StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
|
90 |
+
if hasattr(pipe, 'safety_checker') and pipe.safety_checker is not None:
|
91 |
+
pipe.safety_checker.to("cuda")
|
92 |
|
93 |
print("λͺ¨λΈ λ‘λ© μλ£")
|
94 |
return True
|
|
|
274 |
def process_image(height, width, steps, scales, prompt, seed):
|
275 |
global pipe
|
276 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
277 |
# μ
λ ₯κ° κ²μ¦
|
278 |
if not prompt or prompt.strip() == "":
|
279 |
return None, "", False, "μ΄λ―Έμ§ μ€λͺ
μ μ
λ ₯ν΄μ£ΌμΈμ.", True
|
|
|
294 |
else:
|
295 |
seed = int(seed) # νμ
λ³ν μμ νκ² μ²λ¦¬
|
296 |
|
297 |
+
# λμ΄μ λλΉλ₯Ό 64μ λ°°μλ‘ μ‘°μ (FLUX λͺ¨λΈ μꡬμ¬ν)
|
298 |
+
height = (int(height) // 64) * 64
|
299 |
+
width = (int(width) // 64) * 64
|
300 |
+
|
301 |
+
# μμ μ₯μΉ - μ΅λκ° μ ν
|
302 |
+
steps = min(int(steps), 25)
|
303 |
+
scales = max(min(float(scales), 5.0), 0.0)
|
304 |
+
|
305 |
# μ΄λ―Έμ§ μμ±
|
306 |
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
|
307 |
+
# μ€μ: generator μ€μ μ deviceλ₯Ό λͺ
μμ μΌλ‘ μ§μ
|
308 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
309 |
|
310 |
+
# λͺ¨λ ν
μκ° κ°μ λλ°μ΄μ€μ μλμ§ νμΈ
|
311 |
+
for name, module in pipe.components.items():
|
312 |
+
if hasattr(module, 'device') and module.device.type != "cuda":
|
313 |
+
module.to("cuda")
|
314 |
+
|
315 |
+
# μ΄λ―Έμ§ μμ± - λͺ¨λ λ§€κ°λ³μμ deviceλ₯Ό λͺ
μμ μ§μ
|
|
|
|
|
316 |
generated_image = pipe(
|
317 |
prompt=[filtered_prompt],
|
318 |
generator=generator,
|
|
|
320 |
guidance_scale=scales,
|
321 |
height=height,
|
322 |
width=width,
|
323 |
+
max_sequence_length=256,
|
324 |
+
device="cuda" # λͺ
μμ device μ§μ
|
325 |
).images[0]
|
326 |
|
|
|
327 |
return generated_image, "", False, "", False
|
328 |
|
329 |
except Exception as e:
|
|
|
342 |
|
343 |
# μ΄λ―Έμ§ μμ± μ€λΉ ν¨μ
|
344 |
def prepare_generation(height, width, steps, scales, prompt, seed):
|
345 |
+
global pipe
|
346 |
+
|
347 |
# λͺ¨λΈμ΄ μμ§ λ‘λλμ§ μμλ€λ©΄ λ‘λ
|
348 |
if pipe is None:
|
349 |
+
# λ‘λ© μν νμ
|
350 |
+
loading_message = "λͺ¨λΈμ λ‘λ© μ€μ
λλ€... μ²μ μ€ν μ μκ°μ΄ μμλ μ μμ΅λλ€."
|
351 |
+
|
352 |
is_loaded = initialize_model()
|
353 |
if not is_loaded:
|
354 |
+
return None, "", False, "λͺ¨λΈ λ‘λ©μ μ€ν¨νμ΅λλ€. νμ΄μ§λ₯Ό μλ‘κ³ μΉ¨νκ³ λ€μ μλν΄ μ£ΌμΈμ.", True
|
355 |
+
|
356 |
+
# μμ± μν νμ
|
357 |
+
loading_message = "μ΄λ―Έμ§λ₯Ό μμ± μ€μ
λλ€..."
|
358 |
|
359 |
# μμ± νλ‘μΈμ€ μμ
|
360 |
return process_image(height, width, steps, scales, prompt, seed)
|
|
|
372 |
)
|
373 |
|
374 |
if __name__ == "__main__":
|
375 |
+
|
376 |
demo.queue(max_size=10).launch()
|