lionelgarnier
refactor prompt refinement and interface output handling for improved clarity
6c80f3e
raw
history blame
12.2 kB
import gradio as gr
import numpy as np
import random
import os
import spaces
import torch
from diffusers import DiffusionPipeline
from transformers import pipeline, AutoTokenizer
from huggingface_hub import login
from PIL import Image
hf_token = os.getenv("hf_token")
login(token=hf_token)
# Global constants and default values
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
PRELOAD_MODELS = False
# Default system prompt for text generation
DEFAULT_SYSTEM_PROMPT = """You are a product designer with strong knowledge in text-to-image generation. You will receive a product request in the form of a brief description, and your mission will be to imagine a new product design that meets this need.
The deliverable (generated response) will be exclusively a text prompt for the FLUX.1-schnell text-to-image AI.
This prompt should include a visual description of the object explicitly mentioning the essential aspects of its function.
Additionally, you should explicitly mention in this prompt the aesthetic/photo characteristics of the image rendering (e.g., photorealistic, high quality, focal length, grain, etc.), knowing that the image will be the main image of this object in the product catalog. The background of the generated image must be entirely white.
The prompt should be without narration, can be long but must not exceed 77 tokens."""
# Default Flux parameters
DEFAULT_SEED = 42
DEFAULT_RANDOMIZE_SEED = True
DEFAULT_WIDTH = 512
DEFAULT_HEIGHT = 512
DEFAULT_NUM_INFERENCE_STEPS = 6
DEFAULT_GUIDANCE_SCALE = 0.0
DEFAULT_TEMPERATURE = 0.9
_text_gen_pipeline = None
_image_gen_pipeline = None
@spaces.GPU()
def get_image_gen_pipeline():
global _image_gen_pipeline
if (_image_gen_pipeline is None):
try:
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.bfloat16
_image_gen_pipeline = DiffusionPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell",
torch_dtype=dtype,
).to(device)
# Comment these out for now to match the working example
# _image_gen_pipeline.enable_model_cpu_offload()
# _image_gen_pipeline.enable_vae_slicing()
except Exception as e:
print(f"Error loading image generation model: {e}")
return None
return _image_gen_pipeline
@spaces.GPU()
def get_text_gen_pipeline():
global _text_gen_pipeline
if (_text_gen_pipeline is None):
try:
device = "cuda" if torch.cuda.is_available() else "cpu"
tokenizer = AutoTokenizer.from_pretrained(
"mistralai/Mistral-7B-Instruct-v0.3",
use_fast=True
)
tokenizer.pad_token = tokenizer.pad_token or tokenizer.eos_token
_text_gen_pipeline = pipeline(
"text-generation",
model="mistralai/Mistral-7B-Instruct-v0.3",
tokenizer=tokenizer,
max_new_tokens=2048,
device=device,
pad_token_id=tokenizer.pad_token_id
)
except Exception as e:
print(f"Error loading text generation model: {e}")
return None
return _text_gen_pipeline
@spaces.GPU()
def refine_prompt(prompt, system_prompt=DEFAULT_SYSTEM_PROMPT, progress=gr.Progress()):
text_gen = get_text_gen_pipeline()
if text_gen is None:
return "", "Text generation model is unavailable."
try:
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
]
# Indicate progress started
progress(0, desc="Generating text")
# Generate text
refined_prompt = text_gen(messages)
# Indicate progress complete
progress(1)
# Extract just the assistant's content from the response
try:
messages = refined_prompt[0]['generated_text']
# Find the last message with role 'assistant'
assistant_messages = [msg for msg in messages if msg['role'] == 'assistant']
if not assistant_messages:
return "", "Error: No assistant response found"
assistant_content = assistant_messages[-1]['content']
# Remove quotation marks at the beginning and end
if assistant_content.startswith('"') and assistant_content.endswith('"'):
assistant_content = assistant_content[1:-1]
return assistant_content, "Prompt refined successfully!"
except (KeyError, IndexError):
return "", "Error: Unexpected response format from the model"
except Exception as e:
print(f"Error in refine_prompt: {str(e)}") # Add debug print
return "", f"Error refining prompt: {str(e)}"
def validate_dimensions(width, height):
if width * height > MAX_IMAGE_SIZE * MAX_IMAGE_SIZE:
return False, "Image dimensions too large"
return True, None
@spaces.GPU()
def infer(prompt, seed=DEFAULT_SEED,
randomize_seed=DEFAULT_RANDOMIZE_SEED,
width=DEFAULT_WIDTH,
height=DEFAULT_HEIGHT,
num_inference_steps=DEFAULT_NUM_INFERENCE_STEPS,
progress=gr.Progress(track_tqdm=True)):
try:
# Validate that prompt is not empty
if not prompt or prompt.strip() == "":
return None, "Please provide a valid prompt."
progress(0.1, desc="Loading model")
pipe = get_image_gen_pipeline()
if pipe is None:
return None, "Image generation model is unavailable."
is_valid, error_msg = validate_dimensions(width, height)
if not is_valid:
return None, error_msg
if randomize_seed:
seed = random.randint(0, MAX_SEED)
# Use default torch generator instead of cuda-specific generator
generator = torch.Generator().manual_seed(seed)
progress(0.3, desc="Running inference")
# Match the working example's parameters
output = pipe(
prompt=prompt,
width=width,
height=height,
num_inference_steps=num_inference_steps,
generator=generator,
guidance_scale=DEFAULT_GUIDANCE_SCALE,
)
progress(0.8, desc="Processing output")
image = output.images[0]
progress(1.0, desc="Complete")
return image, f"Image generated successfully with seed {seed}"
except Exception as e:
print(f"Error in infer: {str(e)}")
return None, f"Error generating image: {str(e)}"
# Format: [prompt, system_prompt]
examples = [
"a backpack for kids, flower style",
"medieval flip flops",
"cat shaped cake mold",
]
css="""
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
def preload_models():
print("Preloading models...")
text_success = get_text_gen_pipeline() is not None
image_success = get_image_gen_pipeline() is not None
success = text_success and image_success
status = "Models preloaded successfully!" if success else "Error preloading models"
print(status)
return success
# Create a combined function that handles the whole pipeline from example to image
# This version gets the parameters from the UI components
@spaces.GPU()
def process_example_pipeline(example_prompt, system_prompt=DEFAULT_SYSTEM_PROMPT, progress=gr.Progress()):
# Step 1: Update status
progress(0, desc="Starting example processing")
# Step 2: Refine the prompt
progress(0.1, desc="Refining prompt with Mistral")
refined, status = refine_prompt(example_prompt, system_prompt, progress)
if not refined:
return "", "Failed to refine prompt: " + status
# Return only the refined prompt and status - don't generate image
return refined, "Prompt refined successfully!"
def create_interface():
# Preload models if needed
if PRELOAD_MODELS:
models_loaded = preload_models()
model_status = "✅ Models loaded successfully!" if models_loaded else "⚠️ Error loading models"
else:
model_status = "ℹ️ Models will be loaded on demand"
with gr.Blocks(css=css) as demo:
gr.Info(model_status)
with gr.Column(elem_id="col-container"):
gr.Markdown("# Text to Product\nUsing Mistral-7B-Instruct-v0.3 + FLUX.1-dev + Trellis")
prompt = gr.Text(
show_label=False,
max_lines=1,
placeholder="Enter basic object prompt",
container=False,
)
prompt_button = gr.Button("Refine prompt with Mistral")
refined_prompt = gr.Text(
show_label=False,
max_lines=10,
placeholder="Detailed object prompt",
container=False,
max_length=2048,
)
visual_button = gr.Button("Create visual with Flux")
generated_image = gr.Image(show_label=False)
gen3d_button = gr.Button("Create 3D visual with Trellis")
message_box = gr.Textbox(
label="Status Messages",
interactive=False,
placeholder="Status messages will appear here",
)
# Accordion sections for advanced settings
with gr.Accordion("Advanced Settings", open=False):
with gr.Tab("Mistral"):
# Mistral settings
temperature = gr.Slider(
label="Temperature",
value=DEFAULT_TEMPERATURE,
minimum=0.0,
maximum=1.0,
step=0.05,
info="Higher values produce more diverse outputs",
)
system_prompt = gr.Textbox(
label="System Prompt",
value=DEFAULT_SYSTEM_PROMPT,
lines=10,
info="Instructions for the Mistral model"
)
with gr.Tab("Flux"):
# Flux settings
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=DEFAULT_SEED)
randomize_seed = gr.Checkbox(label="Randomize seed", value=DEFAULT_RANDOMIZE_SEED)
with gr.Row():
width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_WIDTH)
height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_HEIGHT)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=DEFAULT_NUM_INFERENCE_STEPS,
)
# Examples section - simplified version that only updates the prompt fields
gr.Examples(
examples=examples, # Now just a list of prompts
fn=process_example_pipeline,
inputs=[prompt], # Add system_prompt as input
outputs=[refined_prompt, message_box], # Don't output image
cache_examples=True,
)
# Event handlers
gr.on(
triggers=[prompt_button.click, prompt.submit],
fn=refine_prompt,
inputs=[prompt, system_prompt], # Add system_prompt as input
outputs=[refined_prompt, message_box]
)
gr.on(
triggers=[visual_button.click],
fn=infer,
inputs=[refined_prompt, seed, randomize_seed, width, height, num_inference_steps],
outputs=[generated_image, message_box]
)
return demo
if __name__ == "__main__":
demo = create_interface()
demo.launch()