Spaces:
Runtime error
Runtime error
File size: 10,372 Bytes
5845270 e69d279 19bcaa7 aabfbe0 e69d279 07c838c 19bcaa7 47d8bfc 19bcaa7 76813dd 19bcaa7 08f5d28 e69d279 86467c9 52efc32 e69d279 b0c8c02 827b490 0d131d4 470ecaf 827b490 16aaa49 827b490 b6b421e b0c8c02 640d399 b0c8c02 98c7793 b6b421e 5e367a0 b0c8c02 98c7793 5428aaf 47d8bfc 5428aaf 5e367a0 16d258e 69e268f 16d258e 5e367a0 69e268f 5e367a0 5428aaf 5e367a0 5428aaf 6894e88 e69d279 16aaa49 5428aaf 98c7793 3dc3dff 98c7793 5428aaf 16aaa49 5428aaf 16aaa49 2434ffa 16aaa49 47d8bfc 2434ffa 640d399 5428aaf 47d8bfc 9c9a1f3 e69d279 5ffb407 e69d279 276236e 640d399 276236e 640d399 276236e 640d399 08f5d28 276236e 640d399 52efc32 640d399 276236e 640d399 276236e 640d399 276236e 640d399 aabfbe0 640d399 aabfbe0 276236e b39baa9 276236e e69d279 640d399 276236e b39baa9 276236e 640d399 276236e 640d399 276236e 640d399 276236e 640d399 276236e e6ef8b4 276236e 640d399 276236e 5ffb407 33188dc 640d399 276236e e69d279 640d399 276236e 640d399 5e367a0 276236e 640d399 276236e 6894e88 276236e e69d279 276236e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 |
import gradio as gr
import numpy as np
import random
import os
import spaces
import torch
from diffusers import DiffusionPipeline
from transformers import pipeline, AutoTokenizer
from huggingface_hub import login
from PIL import Image
hf_token = os.getenv("hf_token")
login(token=hf_token)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
PRELOAD_MODELS = False # Easy switch for preloading
_text_gen_pipeline = None
_image_gen_pipeline = None
@spaces.GPU()
def get_image_gen_pipeline():
global _image_gen_pipeline
if _image_gen_pipeline is None:
try:
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.bfloat16
_image_gen_pipeline = DiffusionPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell",
torch_dtype=dtype,
).to(device)
# Comment these out for now to match the working example
# _image_gen_pipeline.enable_model_cpu_offload()
# _image_gen_pipeline.enable_vae_slicing()
except Exception as e:
print(f"Error loading image generation model: {e}")
return None
return _image_gen_pipeline
@spaces.GPU()
def get_text_gen_pipeline():
global _text_gen_pipeline
if _text_gen_pipeline is None:
try:
device = "cuda" if torch.cuda.is_available() else "cpu"
tokenizer = AutoTokenizer.from_pretrained(
"mistralai/Mistral-7B-Instruct-v0.3",
use_fast=True
)
tokenizer.pad_token = tokenizer.pad_token or tokenizer.eos_token
_text_gen_pipeline = pipeline(
"text-generation",
model="mistralai/Mistral-7B-Instruct-v0.3",
tokenizer=tokenizer,
max_new_tokens=2048,
device=device,
pad_token_id=tokenizer.pad_token_id
)
except Exception as e:
print(f"Error loading text generation model: {e}")
return None
return _text_gen_pipeline
@spaces.GPU()
def refine_prompt(prompt, progress=gr.Progress(track_tqdm=True)):
text_gen = get_text_gen_pipeline()
if text_gen is None:
return "Text generation model is unavailable."
try:
messages = [
{"role": "system", "content": "Vous êtes un designer produit avec de solides connaissances dans la génération de texte en image. Vous recevrez une demande de produit sous forme de description succincte, et votre mission sera d'imaginer un nouveau design de produit répondant à ce besoin.\n\nLe livrable (réponse générée) sera exclusivement un texte de prompt pour l'IA de texte to image FLUX.1-schnell.\n\nCe prompt devra inclure une description visuelle de l'objet mentionnant explicitement les aspects indispensables de sa fonction.\nA coté de ça vous devez aussi explicitement mentionner dans ce prompt les caractéristiques esthétiques/photo du rendu image (ex : photoréaliste, haute qualité, focale, grain, etc.), sachant que l'image sera l'image principale de cet objet dans le catalogue produit. Le fond de l'image générée doit être entièrement blanc.\nLe prompt doit être sans narration, peut être long mais ne doit pas dépasser 77 jetons."}, {"role": "user", "content": prompt},
]
with progress.tqdm(total=1, desc="Generating text") as pbar:
refined_prompt = text_gen(messages)
pbar.update(1)
# Extract just the assistant's content from the response
try:
messages = refined_prompt[0]['generated_text']
# Find the last message with role 'assistant'
assistant_messages = [msg for msg in messages if msg['role'] == 'assistant']
if not assistant_messages:
return "Error: No assistant response found"
assistant_content = assistant_messages[-1]['content']
return assistant_content, "Prompt refined successfully!"
except (KeyError, IndexError):
return "", "Error: Unexpected response format from the model"
except Exception as e:
return "", f"Error refining prompt: {str(e)}"
def validate_dimensions(width, height):
if width * height > MAX_IMAGE_SIZE * MAX_IMAGE_SIZE:
return False, "Image dimensions too large"
return True, None
@spaces.GPU()
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
try:
# Validate that prompt is not empty
if not prompt or prompt.strip() == "":
return None, "Please provide a valid prompt."
pipe = get_image_gen_pipeline()
if pipe is None:
return None, "Image generation model is unavailable."
is_valid, error_msg = validate_dimensions(width, height)
if not is_valid:
return None, error_msg
if randomize_seed:
seed = random.randint(0, MAX_SEED)
# Use default torch generator instead of cuda-specific generator
generator = torch.Generator().manual_seed(seed)
# Match the working example's parameters
output = pipe(
prompt=prompt,
width=width,
height=height,
num_inference_steps=num_inference_steps,
generator=generator,
guidance_scale=0.0, # Changed from 7.5 to 0.0
)
image = output.images[0]
return image, f"Image generated successfully with seed {seed}"
except Exception as e:
print(f"Error in infer: {str(e)}")
return None, f"Error generating image: {str(e)}"
examples = [
"a backpack for kids, flower style",
"medieval flip flops",
"cat shaped cake mold",
]
css="""
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
def preload_models():
global _text_gen_pipeline, _image_gen_pipeline
print("Preloading models...")
success = True
try:
_text_gen_pipeline = get_text_gen_pipeline()
if _text_gen_pipeline is None:
success = False
except Exception as e:
print(f"Error preloading text generation model: {str(e)}")
success = False
try:
_image_gen_pipeline = get_image_gen_pipeline()
if _image_gen_pipeline is None:
success = False
except Exception as e:
print(f"Error preloading image generation model: {str(e)}")
success = False
status = "Models preloaded successfully!" if success else "Error preloading models"
print(status)
return success
def create_interface():
# Preload models if needed
if PRELOAD_MODELS:
models_loaded = preload_models()
model_status = "✅ Models loaded successfully!" if models_loaded else "⚠️ Error loading models"
else:
model_status = "ℹ️ Models will be loaded on demand"
with gr.Blocks(css=css) as demo:
gr.Info(model_status)
with gr.Column(elem_id="col-container"):
gr.Markdown("# Text to Product\nUsing Mistral-7B-Instruct-v0.3 + FLUX.1-dev + Trellis")
# Basic inputs
with gr.Row():
prompt = gr.Text(
show_label=False,
max_lines=1,
placeholder="Enter basic object prompt",
container=False,
)
prompt_button = gr.Button("Refine prompt with Mistral")
refined_prompt = gr.Text(
show_label=False,
max_lines=10,
placeholder="Detailed object prompt",
container=False,
max_length=2048,
)
visual_button = gr.Button("Create visual with Flux")
generated_image = gr.Image(show_label=False)
error_box = gr.Textbox(
label="Status Messages",
interactive=False,
placeholder="Status messages will appear here",
)
# Accordion sections for advanced settings
with gr.Accordion("Advanced Settings", open=False):
with gr.Tab("Mistral"):
# Mistral settings
temperature = gr.Slider(
label="Temperature",
value=0.9,
minimum=0.0,
maximum=1.0,
step=0.05,
info="Higher values produce more diverse outputs",
)
with gr.Tab("Flux"):
# Flux settings
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=512)
height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=512)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=10,
)
# Examples section
gr.Examples(
examples=examples,
fn=refine_prompt,
inputs=[prompt],
outputs=[refined_prompt],
cache_examples=True,
)
# Event handlers
gr.on(
triggers=[prompt_button.click, prompt.submit],
fn=refine_prompt,
inputs=[prompt],
outputs=[refined_prompt, error_box]
)
gr.on(
triggers=[visual_button.click],
fn=infer,
inputs=[refined_prompt, seed, randomize_seed, width, height, num_inference_steps],
outputs=[generated_image, error_box]
)
return demo
if __name__ == "__main__":
demo = create_interface()
demo.launch() |