Spaces:
Runtime error
Runtime error
lionelgarnier
commited on
Commit
·
16aaa49
1
Parent(s):
640d399
try fix flux based on example
Browse files- app.py +15 -17
- requirements.txt +5 -1
app.py
CHANGED
@@ -28,11 +28,12 @@ def get_image_gen_pipeline():
|
|
28 |
dtype = torch.bfloat16
|
29 |
_image_gen_pipeline = DiffusionPipeline.from_pretrained(
|
30 |
"black-forest-labs/FLUX.1-schnell",
|
31 |
-
# "black-forest-labs/FLUX.1-dev",
|
32 |
torch_dtype=dtype,
|
33 |
).to(device)
|
34 |
-
|
35 |
-
|
|
|
|
|
36 |
except Exception as e:
|
37 |
print(f"Error loading image generation model: {e}")
|
38 |
return None
|
@@ -94,9 +95,8 @@ def validate_dimensions(width, height):
|
|
94 |
return True, None
|
95 |
|
96 |
@spaces.GPU()
|
97 |
-
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4):
|
98 |
try:
|
99 |
-
|
100 |
# Validate that prompt is not empty
|
101 |
if not prompt or prompt.strip() == "":
|
102 |
return None, "Please provide a valid prompt."
|
@@ -112,22 +112,20 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_in
|
|
112 |
if randomize_seed:
|
113 |
seed = random.randint(0, MAX_SEED)
|
114 |
|
115 |
-
|
|
|
116 |
|
117 |
-
|
118 |
output = pipe(
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
)
|
127 |
|
128 |
image = output.images[0]
|
129 |
-
|
130 |
-
#torch.cuda.empty_cache()
|
131 |
return image, f"Image generated successfully with seed {seed}"
|
132 |
except Exception as e:
|
133 |
print(f"Error in infer: {str(e)}")
|
|
|
28 |
dtype = torch.bfloat16
|
29 |
_image_gen_pipeline = DiffusionPipeline.from_pretrained(
|
30 |
"black-forest-labs/FLUX.1-schnell",
|
|
|
31 |
torch_dtype=dtype,
|
32 |
).to(device)
|
33 |
+
|
34 |
+
# Comment these out for now to match the working example
|
35 |
+
# _image_gen_pipeline.enable_model_cpu_offload()
|
36 |
+
# _image_gen_pipeline.enable_vae_slicing()
|
37 |
except Exception as e:
|
38 |
print(f"Error loading image generation model: {e}")
|
39 |
return None
|
|
|
95 |
return True, None
|
96 |
|
97 |
@spaces.GPU()
|
98 |
+
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
|
99 |
try:
|
|
|
100 |
# Validate that prompt is not empty
|
101 |
if not prompt or prompt.strip() == "":
|
102 |
return None, "Please provide a valid prompt."
|
|
|
112 |
if randomize_seed:
|
113 |
seed = random.randint(0, MAX_SEED)
|
114 |
|
115 |
+
# Use default torch generator instead of cuda-specific generator
|
116 |
+
generator = torch.Generator().manual_seed(seed)
|
117 |
|
118 |
+
# Match the working example's parameters
|
119 |
output = pipe(
|
120 |
+
prompt=prompt,
|
121 |
+
width=width,
|
122 |
+
height=height,
|
123 |
+
num_inference_steps=num_inference_steps,
|
124 |
+
generator=generator,
|
125 |
+
guidance_scale=0.0, # Changed from 7.5 to 0.0
|
126 |
+
)
|
|
|
127 |
|
128 |
image = output.images[0]
|
|
|
|
|
129 |
return image, f"Image generated successfully with seed {seed}"
|
130 |
except Exception as e:
|
131 |
print(f"Error in infer: {str(e)}")
|
requirements.txt
CHANGED
@@ -30,4 +30,8 @@ accelerate
|
|
30 |
git+https://github.com/huggingface/diffusers.git
|
31 |
invisible_watermark
|
32 |
sentencepiece
|
33 |
-
protobuf
|
|
|
|
|
|
|
|
|
|
30 |
git+https://github.com/huggingface/diffusers.git
|
31 |
invisible_watermark
|
32 |
sentencepiece
|
33 |
+
protobuf
|
34 |
+
|
35 |
+
torch
|
36 |
+
transformers==4.42.4
|
37 |
+
xformers
|