Spaces:
Runtime error
Runtime error
lionelgarnier
commited on
Commit
·
ac3fb1c
1
Parent(s):
234c60d
remove progress
Browse files
app.py
CHANGED
@@ -93,20 +93,20 @@ def validate_dimensions(width, height):
|
|
93 |
return True, None
|
94 |
|
95 |
@spaces.GPU()
|
96 |
-
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)
|
97 |
try:
|
98 |
-
progress(0, desc="Starting generation...")
|
99 |
|
100 |
# Validate that prompt is not empty
|
101 |
if not prompt or prompt.strip() == "":
|
102 |
return None, "Please provide a valid prompt."
|
103 |
|
104 |
-
progress(0.1, desc="Loading image generation model...")
|
105 |
pipe = get_image_gen_pipeline()
|
106 |
if pipe is None:
|
107 |
return None, "Image generation model is unavailable."
|
108 |
|
109 |
-
progress(0.2, desc="Validating dimensions...")
|
110 |
is_valid, error_msg = validate_dimensions(width, height)
|
111 |
if not is_valid:
|
112 |
return None, error_msg
|
@@ -114,10 +114,10 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_in
|
|
114 |
if randomize_seed:
|
115 |
seed = random.randint(0, MAX_SEED)
|
116 |
|
117 |
-
progress(0.3, desc="Setting up generator...")
|
118 |
generator = torch.Generator("cuda").manual_seed(seed) # Explicitly use CUDA generator
|
119 |
|
120 |
-
progress(0.4, desc="Generating image...")
|
121 |
with torch.autocast('cuda'):
|
122 |
image = pipe(
|
123 |
prompt=prompt,
|
@@ -125,12 +125,12 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_in
|
|
125 |
height=height,
|
126 |
num_inference_steps=num_inference_steps,
|
127 |
generator=generator,
|
128 |
-
guidance_scale=
|
129 |
-
max_sequence_length=512
|
130 |
).images[0]
|
131 |
|
132 |
torch.cuda.empty_cache() # Clean up GPU memory after generation
|
133 |
-
progress(1.0, desc="Done!")
|
134 |
return image, seed
|
135 |
except Exception as e:
|
136 |
print(f"Error in infer: {str(e)}") # Add detailed error logging
|
|
|
93 |
return True, None
|
94 |
|
95 |
@spaces.GPU()
|
96 |
+
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4): # , progress=gr.Progress(track_tqdm=True)
|
97 |
try:
|
98 |
+
# progress(0, desc="Starting generation...")
|
99 |
|
100 |
# Validate that prompt is not empty
|
101 |
if not prompt or prompt.strip() == "":
|
102 |
return None, "Please provide a valid prompt."
|
103 |
|
104 |
+
# progress(0.1, desc="Loading image generation model...")
|
105 |
pipe = get_image_gen_pipeline()
|
106 |
if pipe is None:
|
107 |
return None, "Image generation model is unavailable."
|
108 |
|
109 |
+
# progress(0.2, desc="Validating dimensions...")
|
110 |
is_valid, error_msg = validate_dimensions(width, height)
|
111 |
if not is_valid:
|
112 |
return None, error_msg
|
|
|
114 |
if randomize_seed:
|
115 |
seed = random.randint(0, MAX_SEED)
|
116 |
|
117 |
+
# progress(0.3, desc="Setting up generator...")
|
118 |
generator = torch.Generator("cuda").manual_seed(seed) # Explicitly use CUDA generator
|
119 |
|
120 |
+
# progress(0.4, desc="Generating image...")
|
121 |
with torch.autocast('cuda'):
|
122 |
image = pipe(
|
123 |
prompt=prompt,
|
|
|
125 |
height=height,
|
126 |
num_inference_steps=num_inference_steps,
|
127 |
generator=generator,
|
128 |
+
guidance_scale=0.0, # Increased guidance scale
|
129 |
+
# max_sequence_length=512
|
130 |
).images[0]
|
131 |
|
132 |
torch.cuda.empty_cache() # Clean up GPU memory after generation
|
133 |
+
# progress(1.0, desc="Done!")
|
134 |
return image, seed
|
135 |
except Exception as e:
|
136 |
print(f"Error in infer: {str(e)}") # Add detailed error logging
|