Spaces:
Runtime error
Runtime error
from diffusers import DiffusionPipeline | |
import torch | |
prompt_presets = { | |
"Cinematic": "cinematic lighting, epic composition, 8k", | |
"Realistic Portrait": "realistic face, shallow depth of field, photography", | |
"Anime Style": "anime, cel-shading, crisp lines, colorful", | |
"Fantasy": "mythical, magical light, detailed, fantasy world", | |
"None": "" | |
} | |
def load_model(): | |
print("Loading SDXL model...") | |
pipe = DiffusionPipeline.from_pretrained( | |
"stabilityai/stable-diffusion-xl-base-1.0", | |
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, | |
variant="fp16" if torch.cuda.is_available() else None | |
) | |
pipe.to("cuda" if torch.cuda.is_available() else "cpu") | |
return pipe | |
def generate_image(pipe, prompt: str, guidance: float, steps: int, width: int, height: int): | |
if not prompt or len(prompt.strip()) < 5: | |
raise ValueError("Prompt too short. Please describe your idea better.") | |
result = pipe(prompt, guidance_scale=guidance, num_inference_steps=steps, height=height, width=width) | |
return result.images[0] |