Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,17 +3,16 @@ import os
|
|
3 |
import random
|
4 |
import torch
|
5 |
import numpy as np
|
6 |
-
from diffusers import DiffusionPipeline,
|
7 |
from PIL import Image
|
8 |
import re
|
9 |
|
10 |
-
def generate_image(pipe,
|
11 |
"""
|
12 |
使用 FLUX.1-Krea-dev 模型生成图像。
|
13 |
|
14 |
Args:
|
15 |
-
pipe: Diffusers pipeline.
|
16 |
-
good_vae: 高质量的 VAE 解码器.
|
17 |
prompt (str): 文本提示.
|
18 |
seed (int): 随机种子.
|
19 |
randomize_seed (bool): 是否随机化种子.
|
@@ -32,30 +31,18 @@ def generate_image(pipe, good_vae, prompt, seed=42, randomize_seed=True, width=7
|
|
32 |
generator = torch.Generator(device=pipe.device).manual_seed(seed)
|
33 |
|
34 |
print(f"ℹ️ 使用种子: {seed}")
|
35 |
-
print("
|
36 |
|
37 |
-
#
|
38 |
-
|
39 |
prompt=prompt,
|
40 |
guidance_scale=guidance_scale,
|
41 |
num_inference_steps=num_inference_steps,
|
42 |
width=width,
|
43 |
height=height,
|
44 |
generator=generator,
|
45 |
-
output_type="
|
46 |
-
).images
|
47 |
-
|
48 |
-
print("2. 使用高质量 VAE 解码图像...")
|
49 |
-
|
50 |
-
# 使用高质量的 VAE 解码潜在向量
|
51 |
-
# 需要根据 VAE 的配置进行缩放
|
52 |
-
latents = latents / good_vae.config.scaling_factor
|
53 |
-
image_tensor = good_vae.decode(latents, return_dict=False)[0]
|
54 |
-
|
55 |
-
print("3. 后处理图像...")
|
56 |
-
|
57 |
-
# 将张量转换为 PIL 图像
|
58 |
-
image = pipe.image_processor.postprocess(image_tensor, output_type="pil")[0]
|
59 |
|
60 |
return image, seed
|
61 |
|
@@ -70,6 +57,7 @@ def main():
|
|
70 |
parser.add_argument("--steps", type=int, default=20, help="推理步数。")
|
71 |
parser.add_argument("--width", type=int, default=768, help="图像宽度。")
|
72 |
parser.add_argument("--height", type=int, default=768, help="图像高度。")
|
|
|
73 |
args = parser.parse_args()
|
74 |
|
75 |
# --- 模型加载 ---
|
@@ -77,12 +65,11 @@ def main():
|
|
77 |
dtype = torch.bfloat16
|
78 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
79 |
|
80 |
-
#
|
81 |
-
|
82 |
-
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-Krea-dev", subfolder="vae", torch_dtype=dtype).to(device)
|
83 |
|
84 |
-
# 加载主 pipeline
|
85 |
-
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-Krea-dev", torch_dtype=dtype, vae=
|
86 |
|
87 |
if device == "cuda":
|
88 |
torch.cuda.empty_cache()
|
@@ -90,30 +77,32 @@ def main():
|
|
90 |
print(f"✅ 模型加载完成,使用设备: {device}")
|
91 |
|
92 |
# --- 图像生成 ---
|
93 |
-
print(f"
|
94 |
|
95 |
randomize = args.seed is None
|
96 |
-
|
|
|
97 |
|
98 |
generated_image, used_seed = generate_image(
|
99 |
pipe=pipe,
|
100 |
-
good_vae=good_vae,
|
101 |
prompt=args.prompt,
|
102 |
seed=seed_value,
|
103 |
randomize_seed=randomize,
|
104 |
width=args.width,
|
105 |
height=args.height,
|
106 |
-
num_inference_steps=args.steps
|
|
|
107 |
)
|
108 |
|
109 |
# --- 保存图像 ---
|
110 |
output_dir = "output"
|
111 |
os.makedirs(output_dir, exist_ok=True)
|
112 |
|
113 |
-
#
|
114 |
safe_prompt = re.sub(r'[^\w\s-]', '', args.prompt).strip()
|
115 |
safe_prompt = re.sub(r'[-\s]+', '_', safe_prompt)
|
116 |
|
|
|
117 |
filename = f"{safe_prompt[:50]}_{used_seed}.png"
|
118 |
filepath = os.path.join(output_dir, filename)
|
119 |
|
|
|
3 |
import random
|
4 |
import torch
|
5 |
import numpy as np
|
6 |
+
from diffusers import DiffusionPipeline, AutoencoderKL
|
7 |
from PIL import Image
|
8 |
import re
|
9 |
|
10 |
+
def generate_image(pipe, prompt, seed=42, randomize_seed=True, width=768, height=768, guidance_scale=4.5, num_inference_steps=20):
|
11 |
"""
|
12 |
使用 FLUX.1-Krea-dev 模型生成图像。
|
13 |
|
14 |
Args:
|
15 |
+
pipe: 配置好的 Diffusers pipeline.
|
|
|
16 |
prompt (str): 文本提示.
|
17 |
seed (int): 随机种子.
|
18 |
randomize_seed (bool): 是否随机化种子.
|
|
|
31 |
generator = torch.Generator(device=pipe.device).manual_seed(seed)
|
32 |
|
33 |
print(f"ℹ️ 使用种子: {seed}")
|
34 |
+
print("🚀 开始生成图像...")
|
35 |
|
36 |
+
# 直接调用 pipeline 生成 PIL 图像,内部会自动处理解码
|
37 |
+
image = pipe(
|
38 |
prompt=prompt,
|
39 |
guidance_scale=guidance_scale,
|
40 |
num_inference_steps=num_inference_steps,
|
41 |
width=width,
|
42 |
height=height,
|
43 |
generator=generator,
|
44 |
+
output_type="pil"
|
45 |
+
).images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
return image, seed
|
48 |
|
|
|
57 |
parser.add_argument("--steps", type=int, default=20, help="推理步数。")
|
58 |
parser.add_argument("--width", type=int, default=768, help="图像宽度。")
|
59 |
parser.add_argument("--height", type=int, default=768, help="图像高度。")
|
60 |
+
parser.add_argument("--guidance", type=float, default=4.5, help="指导比例 (Guidance Scale)。")
|
61 |
args = parser.parse_args()
|
62 |
|
63 |
# --- 模型加载 ---
|
|
|
65 |
dtype = torch.bfloat16
|
66 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
67 |
|
68 |
+
# 加载高质量的 VAE 解码器
|
69 |
+
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-Krea-dev", subfolder="vae", torch_dtype=dtype)
|
|
|
70 |
|
71 |
+
# 加载主 pipeline,并直接将高质量的 VAE 传入
|
72 |
+
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-Krea-dev", torch_dtype=dtype, vae=good_vae).to(device)
|
73 |
|
74 |
if device == "cuda":
|
75 |
torch.cuda.empty_cache()
|
|
|
77 |
print(f"✅ 模型加载完成,使用设备: {device}")
|
78 |
|
79 |
# --- 图像生成 ---
|
80 |
+
print(f"🎨 开始为提示生成图像: '{args.prompt}'")
|
81 |
|
82 |
randomize = args.seed is None
|
83 |
+
# 如果用户没有指定种子,则在调用函数时随机化;否则使用用户指定的种子
|
84 |
+
seed_value = args.seed if not randomize else 42
|
85 |
|
86 |
generated_image, used_seed = generate_image(
|
87 |
pipe=pipe,
|
|
|
88 |
prompt=args.prompt,
|
89 |
seed=seed_value,
|
90 |
randomize_seed=randomize,
|
91 |
width=args.width,
|
92 |
height=args.height,
|
93 |
+
num_inference_steps=args.steps,
|
94 |
+
guidance_scale=args.guidance
|
95 |
)
|
96 |
|
97 |
# --- 保存图像 ---
|
98 |
output_dir = "output"
|
99 |
os.makedirs(output_dir, exist_ok=True)
|
100 |
|
101 |
+
# 清理提示词以用作安全的文件名
|
102 |
safe_prompt = re.sub(r'[^\w\s-]', '', args.prompt).strip()
|
103 |
safe_prompt = re.sub(r'[-\s]+', '_', safe_prompt)
|
104 |
|
105 |
+
# 防止文件名过长
|
106 |
filename = f"{safe_prompt[:50]}_{used_seed}.png"
|
107 |
filepath = os.path.join(output_dir, filename)
|
108 |
|