lionelgarnier commited on
Commit
5428aaf
·
1 Parent(s): 3114c99

cursor changes

Browse files
Files changed (1) hide show
  1. app.py +47 -21
app.py CHANGED
@@ -21,29 +21,49 @@ MAX_SEED = np.iinfo(np.int32).max
21
  MAX_IMAGE_SIZE = 2048
22
 
23
  def refine_prompt(prompt):
24
- chatbot = pipeline("text-generation", model="mistralai/Mistral-7B-Instruct-v0.3", max_new_tokens=2048, device=device)
25
- messages = [
26
- {"role": "system", "content": "You are a product designer. You will get a basic prompt of product request and you need to imagine a new product design to satisfy that need. Produce an extended description of product front view that will be use by Flux to generate a visual"},
27
- {"role": "user", "content": prompt},
28
- ]
29
- refined_prompt = chatbot(messages)
30
- return refined_prompt
 
 
 
 
 
 
 
 
31
 
32
  @spaces.GPU()
33
  def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
34
- if randomize_seed:
35
- seed = random.randint(0, MAX_SEED)
36
- generator = torch.Generator().manual_seed(seed)
37
- image = pipe(
38
- prompt = prompt,
39
- width = width,
40
- height = height,
41
- num_inference_steps = num_inference_steps,
42
- generator = generator,
43
- guidance_scale=0.0,
44
- max_sequence_length=2048
45
- ).images[0]
46
- return image, seed
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
  examples = [
49
  "a tiny astronaut hatching from an egg on the moon",
@@ -60,7 +80,13 @@ css="""
60
 
61
  with gr.Blocks(css=css) as demo:
62
 
63
- info = gr.Info("...")
 
 
 
 
 
 
64
 
65
  with gr.Column(elem_id="col-container"):
66
  gr.Markdown(f"""# Text to Product
 
21
  MAX_IMAGE_SIZE = 2048
22
 
23
  def refine_prompt(prompt):
24
+ try:
25
+ chatbot = pipeline("text-generation", model="mistralai/Mistral-7B-Instruct-v0.3", max_new_tokens=2048, device=device)
26
+ messages = [
27
+ {"role": "system", "content": "You are a product designer. You will get a basic prompt of product request and you need to imagine a new product design to satisfy that need. Produce an extended description of product front view that will be use by Flux to generate a visual"},
28
+ {"role": "user", "content": prompt},
29
+ ]
30
+ refined_prompt = chatbot(messages)
31
+ return refined_prompt
32
+ except Exception as e:
33
+ return f"Error refining prompt: {str(e)}"
34
+
35
+ def validate_dimensions(width, height):
36
+ if width * height > MAX_IMAGE_SIZE * MAX_IMAGE_SIZE:
37
+ return False, "Image dimensions too large"
38
+ return True, None
39
 
40
  @spaces.GPU()
41
  def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
42
+ try:
43
+ progress(0, desc="Starting generation...")
44
+
45
+ if randomize_seed:
46
+ seed = random.randint(0, MAX_SEED)
47
+
48
+ progress(0.2, desc="Setting up generator...")
49
+ generator = torch.Generator().manual_seed(seed)
50
+
51
+ progress(0.4, desc="Generating image...")
52
+ with torch.cuda.amp.autocast():
53
+ image = pipe(
54
+ prompt = prompt,
55
+ width = width,
56
+ height = height,
57
+ num_inference_steps = num_inference_steps,
58
+ generator = generator,
59
+ guidance_scale=0.0,
60
+ max_sequence_length=2048
61
+ ).images[0]
62
+
63
+ progress(1.0, desc="Done!")
64
+ return image, seed
65
+ except Exception as e:
66
+ return None, f"Error generating image: {str(e)}"
67
 
68
  examples = [
69
  "a tiny astronaut hatching from an egg on the moon",
 
80
 
81
  with gr.Blocks(css=css) as demo:
82
 
83
+ info = gr.Info("Loading models... Please wait.")
84
+
85
+ try:
86
+ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype).to(device)
87
+ info.value = "Models loaded successfully!"
88
+ except Exception as e:
89
+ info.value = f"Error loading models: {str(e)}"
90
 
91
  with gr.Column(elem_id="col-container"):
92
  gr.Markdown(f"""# Text to Product