lionelgarnier
add default system prompt for text generation and update processing functions to accept it
067e31b
raw
history blame
12.1 kB
import gradio as gr
import numpy as np
import random
import os
import spaces
import torch
from diffusers import DiffusionPipeline
from transformers import pipeline, AutoTokenizer
from huggingface_hub import login
from PIL import Image
hf_token = os.getenv("hf_token")
login(token=hf_token)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
PRELOAD_MODELS = False # Easy switch for preloading
_text_gen_pipeline = None
_image_gen_pipeline = None
@spaces.GPU()
def get_image_gen_pipeline():
global _image_gen_pipeline
if (_image_gen_pipeline is None):
try:
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.bfloat16
_image_gen_pipeline = DiffusionPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell",
torch_dtype=dtype,
).to(device)
# Comment these out for now to match the working example
# _image_gen_pipeline.enable_model_cpu_offload()
# _image_gen_pipeline.enable_vae_slicing()
except Exception as e:
print(f"Error loading image generation model: {e}")
return None
return _image_gen_pipeline
@spaces.GPU()
def get_text_gen_pipeline():
global _text_gen_pipeline
if (_text_gen_pipeline is None):
try:
device = "cuda" if torch.cuda.is_available() else "cpu"
tokenizer = AutoTokenizer.from_pretrained(
"mistralai/Mistral-7B-Instruct-v0.3",
use_fast=True
)
tokenizer.pad_token = tokenizer.pad_token or tokenizer.eos_token
_text_gen_pipeline = pipeline(
"text-generation",
model="mistralai/Mistral-7B-Instruct-v0.3",
tokenizer=tokenizer,
max_new_tokens=2048,
device=device,
pad_token_id=tokenizer.pad_token_id
)
except Exception as e:
print(f"Error loading text generation model: {e}")
return None
return _text_gen_pipeline
# Default system prompt for text generation
DEFAULT_SYSTEM_PROMPT = """Vous êtes un designer produit avec de solides connaissances dans la génération de texte en image. Vous recevrez une demande de produit sous forme de description succincte, et votre mission sera d'imaginer un nouveau design de produit répondant à ce besoin.
Le livrable (réponse générée) sera exclusivement un texte de prompt pour l'IA de texte to image FLUX.1-schnell.
Ce prompt devra inclure une description visuelle de l'objet mentionnant explicitement les aspects indispensables de sa fonction.
A coté de ça vous devez aussi explicitement mentionner dans ce prompt les caractéristiques esthétiques/photo du rendu image (ex : photoréaliste, haute qualité, focale, grain, etc.), sachant que l'image sera l'image principale de cet objet dans le catalogue produit. Le fond de l'image générée doit être entièrement blanc.
Le prompt doit être sans narration, peut être long mais ne doit pas dépasser 77 jetons."""
@spaces.GPU()
def refine_prompt(prompt, system_prompt=DEFAULT_SYSTEM_PROMPT, progress=gr.Progress()):
text_gen = get_text_gen_pipeline()
if text_gen is None:
return "", "Text generation model is unavailable."
try:
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
]
# Indicate progress started
progress(0, desc="Generating text")
# Generate text
refined_prompt = text_gen(messages)
# Indicate progress complete
progress(1)
# Extract just the assistant's content from the response
try:
messages = refined_prompt[0]['generated_text']
# Find the last message with role 'assistant'
assistant_messages = [msg for msg in messages if msg['role'] == 'assistant']
if not assistant_messages:
return "", "Error: No assistant response found"
assistant_content = assistant_messages[-1]['content']
return assistant_content, "Prompt refined successfully!"
except (KeyError, IndexError):
return "", "Error: Unexpected response format from the model"
except Exception as e:
print(f"Error in refine_prompt: {str(e)}") # Add debug print
return "", f"Error refining prompt: {str(e)}"
def validate_dimensions(width, height):
if width * height > MAX_IMAGE_SIZE * MAX_IMAGE_SIZE:
return False, "Image dimensions too large"
return True, None
@spaces.GPU()
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
try:
# Validate that prompt is not empty
if not prompt or prompt.strip() == "":
return None, "Please provide a valid prompt."
pipe = get_image_gen_pipeline()
if pipe is None:
return None, "Image generation model is unavailable."
is_valid, error_msg = validate_dimensions(width, height)
if not is_valid:
return None, error_msg
if randomize_seed:
seed = random.randint(0, MAX_SEED)
# Use default torch generator instead of cuda-specific generator
generator = torch.Generator().manual_seed(seed)
# Match the working example's parameters
output = pipe(
prompt=prompt,
width=width,
height=height,
num_inference_steps=num_inference_steps,
generator=generator,
guidance_scale=0.0, # Changed from 7.5 to 0.0
)
image = output.images[0]
return image, f"Image generated successfully with seed {seed}"
except Exception as e:
print(f"Error in infer: {str(e)}")
return None, f"Error generating image: {str(e)}"
# Update examples to be a list of prompts only, not including other parameters
examples = [
"a backpack for kids, flower style",
"medieval flip flops",
"cat shaped cake mold",
]
css="""
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
def preload_models():
global _text_gen_pipeline, _image_gen_pipeline
print("Preloading models...")
success = True
try:
_text_gen_pipeline = get_text_gen_pipeline()
if _text_gen_pipeline is None:
success = False
except Exception as e:
print(f"Error preloading text generation model: {str(e)}")
success = False
try:
_image_gen_pipeline = get_image_gen_pipeline()
if _image_gen_pipeline is None:
success = False
except Exception as e:
print(f"Error preloading image generation model: {str(e)}")
success = False
status = "Models preloaded successfully!" if success else "Error preloading models"
print(status)
return success
# Create a combined function that handles the whole pipeline from example to image
# This version gets the parameters from the UI components
@spaces.GPU()
def process_example_pipeline(example_prompt, system_prompt=DEFAULT_SYSTEM_PROMPT, progress=gr.Progress()):
# Step 1: Update status
progress(0, desc="Starting example processing")
progress_status = "Selected example: " + example_prompt
# Step 2: Refine the prompt
progress(0.1, desc="Refining prompt with Mistral")
refined, status = refine_prompt(example_prompt, system_prompt, progress)
if not refined:
return example_prompt, "", "Failed to refine prompt: " + status
# Return only the refined prompt and status - don't generate image
return example_prompt, refined, "Prompt refined successfully!"
def create_interface():
# Preload models if needed
if PRELOAD_MODELS:
models_loaded = preload_models()
model_status = "✅ Models loaded successfully!" if models_loaded else "⚠️ Error loading models"
else:
model_status = "ℹ️ Models will be loaded on demand"
with gr.Blocks(css=css) as demo:
gr.Info(model_status)
with gr.Column(elem_id="col-container"):
gr.Markdown("# Text to Product\nUsing Mistral-7B-Instruct-v0.3 + FLUX.1-dev + Trellis")
# Basic inputs
with gr.Row():
prompt = gr.Text(
show_label=False,
max_lines=1,
placeholder="Enter basic object prompt",
container=False,
)
prompt_button = gr.Button("Refine prompt with Mistral")
refined_prompt = gr.Text(
show_label=False,
max_lines=10,
placeholder="Detailed object prompt",
container=False,
max_length=2048,
)
visual_button = gr.Button("Create visual with Flux")
generated_image = gr.Image(show_label=False)
error_box = gr.Textbox(
label="Status Messages",
interactive=False,
placeholder="Status messages will appear here",
)
# Accordion sections for advanced settings
with gr.Accordion("Advanced Settings", open=False):
with gr.Tab("Mistral"):
# Mistral settings
temperature = gr.Slider(
label="Temperature",
value=0.9,
minimum=0.0,
maximum=1.0,
step=0.05,
info="Higher values produce more diverse outputs",
)
system_prompt = gr.Textbox(
label="System Prompt",
value=DEFAULT_SYSTEM_PROMPT,
lines=10,
info="Instructions for the Mistral model"
)
with gr.Tab("Flux"):
# Flux settings
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=512)
height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=512)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=6,
)
# Examples section - simplified version that only updates the prompt fields
gr.Examples(
examples=examples, # Now just a list of prompts
fn=process_example_pipeline,
inputs=[prompt, system_prompt], # Add system_prompt as input
outputs=[prompt, refined_prompt, error_box], # Don't output image
cache_examples=True,
)
# Event handlers
gr.on(
triggers=[prompt_button.click, prompt.submit],
fn=refine_prompt,
inputs=[prompt, system_prompt], # Add system_prompt as input
outputs=[refined_prompt, error_box]
)
gr.on(
triggers=[visual_button.click],
fn=infer,
inputs=[refined_prompt, seed, randomize_seed, width, height, num_inference_steps],
outputs=[generated_image, error_box]
)
return demo
if __name__ == "__main__":
demo = create_interface()
demo.launch()